From 1523837a8596ef1e0f6decb393bf3b09539ff721 Mon Sep 17 00:00:00 2001 From: Ram Date: Wed, 3 Aug 2022 23:04:02 +0530 Subject: [PATCH 01/97] Update version to 2.12 Signed-off-by: Ram --- .travis.yml | 8 ++++---- Makefile | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 66402ccc18..029a4b8d8c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,10 +12,10 @@ cache: - $HOME/.cache/go-build script: - | - if [ "${TRAVIS_BRANCH}" == "master" ]; then - export DOCKER_HUB_STORK_TAG=master - export DOCKER_HUB_STORK_TEST_TAG=latest - export DOCKER_HUB_CMD_EXECUTOR_TAG=master + if [ "${TRAVIS_BRANCH}" == "2.12" ]; then + export DOCKER_HUB_STORK_TAG="${TRAVIS_BRANCH}"-dev + export DOCKER_HUB_STORK_TEST_TAG="${TRAVIS_BRANCH}"-dev + export DOCKER_HUB_CMD_EXECUTOR_TAG="${TRAVIS_BRANCH}"-dev else export DOCKER_HUB_STORK_TAG=`git rev-parse --short HEAD` export DOCKER_HUB_STORK_TEST_TAG=`git rev-parse --short HEAD` diff --git a/Makefile b/Makefile index 994f972741..339856989d 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,7 @@ ifeq ($(BUILD_TYPE),debug) BUILDFLAGS += -gcflags "-N -l" endif -RELEASE_VER := 2.7.0 +RELEASE_VER := 2.12.0 BASE_DIR := $(shell git rev-parse --show-toplevel) GIT_SHA := $(shell git rev-parse --short HEAD) BIN :=$(BASE_DIR)/bin From 7fa49804f6a58d62fc79df4ba123c2753c1b27b2 Mon Sep 17 00:00:00 2001 From: Priyanshu Pandey Date: Thu, 4 Aug 2022 18:22:26 -0600 Subject: [PATCH 02/97] PWX-26049: Vendor updated px-object-controller to fix cache initialization, delete error and multitenancy. Signed-off-by: Priyanshu Pandey --- go.mod | 2 +- go.sum | 4 +- .../pkg/controller/controller.go | 44 +++++++++++++++++-- .../pkg/controller/operation.go | 40 ++++++++++++++--- vendor/modules.txt | 2 +- 5 files changed, 79 insertions(+), 13 deletions(-) diff --git a/go.mod b/go.mod index 4a4b6ad47e..eb0141f9ef 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47 github.com/pborman/uuid v1.2.0 github.com/portworx/kdmp v0.4.1-0.20220710173715-5d42efc7d149 - github.com/portworx/px-object-controller v0.0.0-20220727220448-306ddfd28652 + github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 github.com/portworx/sched-ops v1.20.4-rc1.0.20220401024625-dbc61a336f65 github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 github.com/prometheus/client_golang v1.11.0 diff --git a/go.sum b/go.sum index 83fda5f772..2d111bbd31 100644 --- a/go.sum +++ b/go.sum @@ -1421,8 +1421,8 @@ github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/px-backup-api v1.2.2-0.20210917042806-f2b0725444af/go.mod h1:3+gfGSSmuF1pO9qkOuKiLWpiTKDXpijSg4VNgluGUX0= -github.com/portworx/px-object-controller v0.0.0-20220727220448-306ddfd28652 h1:Kv8k3Zw4hVHcw1zGsWgDlIaG/gUUXLu47PWIz6CCF2g= -github.com/portworx/px-object-controller v0.0.0-20220727220448-306ddfd28652/go.mod h1:g3pw2lI2AjqAixUCRhaBdKTY98znsCPR7NGRrlpimVU= +github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 h1:VNBTmIPjJRZ2QP64zdsrif3ELDHiMzoyNNX74VNHgZ8= +github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987/go.mod h1:g3pw2lI2AjqAixUCRhaBdKTY98znsCPR7NGRrlpimVU= github.com/portworx/pxc v0.33.0/go.mod h1:Tl7hf4K2CDr0XtxzM08sr9H/KsMhscjf9ydb+MnT0U4= github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca h1:jrjwiQdqgDRsQZuiRDaWsbvx/z5t1icQPf7dgJOQUKE= github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca/go.mod h1:0IQvado0rnmbRMORaCqCDrrzjBrX5sU+Sz2+vQwEsjM= diff --git a/vendor/github.com/portworx/px-object-controller/pkg/controller/controller.go b/vendor/github.com/portworx/px-object-controller/pkg/controller/controller.go index 2aa2ed3c9c..ec0096e8ca 100644 --- a/vendor/github.com/portworx/px-object-controller/pkg/controller/controller.go +++ b/vendor/github.com/portworx/px-object-controller/pkg/controller/controller.go @@ -19,6 +19,7 @@ import ( v1 "k8s.io/api/core/v1" k8s_errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -144,6 +145,14 @@ func New(cfg *Config) (*Controller, error) { func (ctrl *Controller) Run(workers int, stopCh chan struct{}) { ctrl.objectFactory.Start(stopCh) + informers := []cache.InformerSynced{ctrl.accessListerSynced, ctrl.bucketListerSynced} + if !cache.WaitForCacheSync(stopCh, informers...) { + logrus.Errorf("Cannot sync caches") + return + } + + ctrl.loadCaches(ctrl.bucketLister, ctrl.accessLister) + for i := 0; i < workers; i++ { go wait.Until(ctrl.bucketWorker, 0, stopCh) go wait.Until(ctrl.accessWorker, 0, stopCh) @@ -235,9 +244,7 @@ func (ctrl *Controller) processBucket(ctx context.Context, key string) error { ctx = ctrl.setupContextFromValue(ctx, bucketclaim.Status.BackendType) logrus.WithContext(ctx).Infof("deleting bucketclaim %q with driver %s", key, bucketclaim.Status.BackendType) - ctrl.deleteBucket(ctx, bucketclaim) - - return nil + return ctrl.deleteBucket(ctx, bucketclaim) } // enqueueBucketClaimWork adds bucketclaim to given work queue. @@ -375,3 +382,34 @@ func (ctrl *Controller) enqueueAccessWork(obj interface{}) { ctrl.accessQueue.Add(objName) } } + +// loadCache fills all controller caches with initial data. +// without this, the caches will be empty and not be able to process +// any new requests when the controller is restarted +func (ctrl *Controller) loadCaches(bucketLister bucketlisters.PXBucketClaimLister, accessLister bucketlisters.PXBucketAccessLister) { + bucketList, err := bucketLister.List(labels.Everything()) + if err != nil { + logrus.Errorf("Controller can't initialize caches: %v", err) + return + } + for _, bucket := range bucketList { + bucketClone := bucket.DeepCopy() + if _, err = ctrl.storeBucketUpdate(bucketClone); err != nil { + logrus.Errorf("error updating bucket cache: %v", err) + } + } + + accessList, err := accessLister.List(labels.Everything()) + if err != nil { + logrus.Errorf("Controller can't initialize caches: %v", err) + return + } + for _, access := range accessList { + accessClone := access.DeepCopy() + if _, err = ctrl.storeAccessUpdate(accessClone); err != nil { + logrus.Errorf("error updating bucket access cache: %v", err) + } + } + + logrus.Info("controller initialized for PXBucketClaims and PXBucketAccesses") +} diff --git a/vendor/github.com/portworx/px-object-controller/pkg/controller/operation.go b/vendor/github.com/portworx/px-object-controller/pkg/controller/operation.go index 00f121a62f..de85351219 100644 --- a/vendor/github.com/portworx/px-object-controller/pkg/controller/operation.go +++ b/vendor/github.com/portworx/px-object-controller/pkg/controller/operation.go @@ -3,6 +3,7 @@ package controller import ( "context" "fmt" + "strconv" "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/api/server/sdk" @@ -21,6 +22,7 @@ const ( commonObjectServiceKeyPrefix = "object.portworx.io/" backendTypeKey = commonObjectServiceKeyPrefix + "backend-type" endpointKey = commonObjectServiceKeyPrefix + "endpoint" + clearBucketKey = commonObjectServiceKeyPrefix + "clear-bucket" commonObjectServiceFinalizerKeyPrefix = "finalizers.object.portworx.io/" accessGrantedFinalizer = commonObjectServiceFinalizerKeyPrefix + "access-granted" @@ -57,16 +59,27 @@ func (ctrl *Controller) deleteBucket(ctx context.Context, pbc *crdv1alpha1.PXBuc return nil } + clearBucket := false + if clearBucketVal, ok := pbc.Annotations[clearBucketKey]; ok { + var err error + clearBucket, err = strconv.ParseBool(clearBucketVal) + if err != nil { + logrus.Errorf("invalid value %s for %s, defaulting to false: %v", clearBucketVal, clearBucketKey, err) + } + } + // Provisioned and deletionPolicy is delete. Delete the bucket here. _, err := ctrl.bucketClient.DeleteBucket(ctx, &api.BucketDeleteRequest{ - BucketId: pbc.Status.BucketID, - Region: pbc.Status.Region, - Endpoint: pbc.Status.Endpoint, + BucketId: pbc.Status.BucketID, + Region: pbc.Status.Region, + Endpoint: pbc.Status.Endpoint, + ClearBucket: clearBucket, }) if err != nil { errMsg := fmt.Sprintf("delete bucket %s failed: %v", pbc.Name, err) logrus.WithContext(ctx).Errorf(errMsg) ctrl.eventRecorder.Event(pbc, v1.EventTypeWarning, "DeleteBucketError", errMsg) + return err } err = ctrl.removeBucketFinalizers(ctx, pbc) @@ -108,6 +121,12 @@ func (ctrl *Controller) createBucket(ctx context.Context, pbc *crdv1alpha1.PXBuc pbc.Status.BackendType = pbclass.Parameters[backendTypeKey] pbc.Status.Endpoint = pbclass.Parameters[endpointKey] pbc.Finalizers = append(pbc.Finalizers, bucketProvisionedFinalizer) + if pbc.Annotations == nil { + pbc.Annotations = make(map[string]string) + } + if clearBucketVal, ok := pbclass.Parameters[clearBucketKey]; ok { + pbc.Annotations[clearBucketKey] = clearBucketVal + } pbc, err = ctrl.k8sBucketClient.ObjectV1alpha1().PXBucketClaims(pbc.Namespace).Update(ctx, pbc, metav1.UpdateOptions{}) if err != nil { ctrl.eventRecorder.Event(pbc, v1.EventTypeWarning, "CreateBucketError", fmt.Sprintf("failed to update bucket: %v", err)) @@ -147,8 +166,8 @@ func (ctrl *Controller) setupContextFromClass(ctx context.Context, pbclass *crdv return grpcserver.AddMetadataToContext(ctx, sdk.ContextDriverKey, backendTypeValue), nil } -func getAccountName(pbclass *crdv1alpha1.PXBucketClass) string { - return fmt.Sprintf("px-os-account-%v", pbclass.ObjectMeta.UID) +func getAccountName(namespace *v1.Namespace) string { + return fmt.Sprintf("px-os-account-%v", namespace.GetUID()) } func getCredentialsSecretName(pba *crdv1alpha1.PXBucketAccess) string { @@ -159,9 +178,18 @@ func getCredentialsSecretName(pba *crdv1alpha1.PXBucketAccess) string { } func (ctrl *Controller) createAccess(ctx context.Context, pba *crdv1alpha1.PXBucketAccess, pbclass *crdv1alpha1.PXBucketClass, bucketID string) error { + // Get namespace UID for multitenancy + namespace, err := ctrl.k8sClient.CoreV1().Namespaces().Get(ctx, pba.Namespace, metav1.GetOptions{}) + if err != nil { + errMsg := fmt.Sprintf("failed to get namespace during grant bucket access %s: %v", pba.Name, err) + logrus.WithContext(ctx).Errorf(errMsg) + ctrl.eventRecorder.Event(pba, v1.EventTypeWarning, "GrantAccessError", errMsg) + return err + } + resp, err := ctrl.bucketClient.AccessBucket(ctx, &api.BucketGrantAccessRequest{ BucketId: bucketID, - AccountName: getAccountName(pbclass), + AccountName: getAccountName(namespace), }) if err != nil { errMsg := fmt.Sprintf("create bucket access %s failed: %v", pba.Name, err) diff --git a/vendor/modules.txt b/vendor/modules.txt index 16ce1e126b..618e15ae55 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -717,7 +717,7 @@ github.com/portworx/kdmp/pkg/version github.com/portworx/kvdb github.com/portworx/kvdb/common github.com/portworx/kvdb/mem -# github.com/portworx/px-object-controller v0.0.0-20220727220448-306ddfd28652 +# github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 ## explicit github.com/portworx/px-object-controller/client/apis/objectservice/v1alpha1 github.com/portworx/px-object-controller/client/clientset/versioned From 22d2b7d14f169cb626d5c018ea0426c850161873 Mon Sep 17 00:00:00 2001 From: Priyanshu Pandey Date: Wed, 10 Aug 2022 13:43:30 -0600 Subject: [PATCH 03/97] PWX-26225: Error in starting px-object-controller should not throw fatal error. Signed-off-by: Priyanshu Pandey --- cmd/stork/stork.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/stork/stork.go b/cmd/stork/stork.go index b685119de7..c36437cfa1 100644 --- a/cmd/stork/stork.go +++ b/cmd/stork/stork.go @@ -493,7 +493,7 @@ func runStork(mgr manager.Manager, d volume.Driver, recorder record.EventRecorde if c.Bool("px-object-controller") { objectController := &objectcontroller.ObjectController{} if err := objectController.Init(); err != nil { - log.Fatalf("Error initializing px-object-controller : %v", err) + log.Warnf("Error initializing px-object-controller : %v", err) } } if c.Bool("kdmp-controller") { From 0d688b8c27e78ac3f7850aa978b8a3e43cd9e042 Mon Sep 17 00:00:00 2001 From: Priyanshu Pandey Date: Wed, 17 Aug 2022 00:09:57 -0600 Subject: [PATCH 04/97] PWX-26330: Disable px-object-controller by default. Signed-off-by: Priyanshu Pandey --- cmd/stork/stork.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/stork/stork.go b/cmd/stork/stork.go index c36437cfa1..0c2457427f 100644 --- a/cmd/stork/stork.go +++ b/cmd/stork/stork.go @@ -143,7 +143,7 @@ func main() { Name: "application-controller", Usage: "Start the controllers for managing applications (default: true)", }, - cli.BoolTFlag{ + cli.BoolFlag{ Name: "px-object-controller", Usage: "Start the px object controller.", }, From b0facdca1bd0da0e82dc2fdc82ab6a0b03555a42 Mon Sep 17 00:00:00 2001 From: Ram Date: Mon, 29 Aug 2022 18:21:28 +0530 Subject: [PATCH 05/97] Force full backup on specified day in daily schedule Signed-off-by: Ram --- pkg/apis/stork/v1alpha1/schedulepolicy.go | 2 ++ .../controllers/applicationbackupschedule.go | 6 +++--- pkg/schedule/schedule.go | 13 ++++++++++++- pkg/utils/utils.go | 3 +++ 4 files changed, 20 insertions(+), 4 deletions(-) diff --git a/pkg/apis/stork/v1alpha1/schedulepolicy.go b/pkg/apis/stork/v1alpha1/schedulepolicy.go index f2282ed79a..9cbc77dbca 100644 --- a/pkg/apis/stork/v1alpha1/schedulepolicy.go +++ b/pkg/apis/stork/v1alpha1/schedulepolicy.go @@ -124,6 +124,8 @@ type DailyPolicy struct { // Options to be passed in to the driver. These will be passed in // to the object being triggered Options map[string]string `json:"options"` + // ForceFullSnapshotDay specifies day of the week for full snapshot to take place + ForceFullSnapshotDay string `json:"forceFullSnapshotDay"` } // GetHourMinute parses and return the hour and minute specified in the policy diff --git a/pkg/applicationmanager/controllers/applicationbackupschedule.go b/pkg/applicationmanager/controllers/applicationbackupschedule.go index 7b01192486..ad2615b9c3 100644 --- a/pkg/applicationmanager/controllers/applicationbackupschedule.go +++ b/pkg/applicationmanager/controllers/applicationbackupschedule.go @@ -14,6 +14,7 @@ import ( "github.com/libopenstorage/stork/pkg/log" "github.com/libopenstorage/stork/pkg/objectstore" "github.com/libopenstorage/stork/pkg/schedule" + "github.com/libopenstorage/stork/pkg/utils" "github.com/libopenstorage/stork/pkg/version" "github.com/portworx/sched-ops/k8s/apiextensions" storkops "github.com/portworx/sched-ops/k8s/stork" @@ -41,7 +42,6 @@ const ( // ApplicationBackupObjectLockRetentionAnnotation - object lock retention period annotation // Since this annotation is used in the px-backup, creating with portworx.io annotation prefix. ApplicationBackupObjectLockRetentionAnnotation = "portworx.io/" + "object-lock-retention-period" - incrementalCountAnnotation = "portworx.io/cloudsnap-incremental-count" dayInSec = 86400 //ObjectLockDefaultIncrementalCount default incremental backup count ObjectLockDefaultIncrementalCount = 5 @@ -453,8 +453,8 @@ func (s *ApplicationBackupScheduleController) startApplicationBackup(backupSched backupscheduleCreationTime, diff, elaspedDays, elaspedDaysInSecs, currentDayStartTime) if lastSuccessfulBackupCreateTime < currentDayStartTime { - // forcing it to be full backup, by setting the incrementalCountAnnotation to zero - backup.Spec.Options[incrementalCountAnnotation] = fmt.Sprintf("%v", 0) + // forcing it to be full backup, by setting the PXIncrementalCountAnnotation to zero + backup.Spec.Options[utils.PXIncrementalCountAnnotation] = fmt.Sprintf("%v", 0) } } } diff --git a/pkg/schedule/schedule.go b/pkg/schedule/schedule.go index 03ed3c7704..11651e1672 100644 --- a/pkg/schedule/schedule.go +++ b/pkg/schedule/schedule.go @@ -8,6 +8,7 @@ import ( stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" "github.com/libopenstorage/stork/pkg/k8sutils" + "github.com/libopenstorage/stork/pkg/utils" "github.com/libopenstorage/stork/pkg/version" "github.com/portworx/sched-ops/k8s/apiextensions" "github.com/portworx/sched-ops/k8s/core" @@ -229,7 +230,17 @@ func GetOptions(policyName string, namespace string, policyType stork_api.Schedu case stork_api.SchedulePolicyTypeInterval: return schedulePolicy.Policy.Interval.Options, nil case stork_api.SchedulePolicyTypeDaily: - return schedulePolicy.Policy.Daily.Options, nil + options := schedulePolicy.Policy.Daily.Options + scheduledDay, ok := stork_api.Days[schedulePolicy.Policy.Daily.ForceFullSnapshotDay] + if ok { + currentDay := GetCurrentTime().Weekday() + // force full backup on specified day + if currentDay == scheduledDay { + options[utils.PXIncrementalCountAnnotation] = "0" + } + logrus.Debugf("Forcing full-snapshot for daily snapshotschedule policy on the day %s", schedulePolicy.Policy.Daily.ForceFullSnapshotDay) + } + return options, nil case stork_api.SchedulePolicyTypeWeekly: return schedulePolicy.Policy.Weekly.Options, nil case stork_api.SchedulePolicyTypeMonthly: diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index e30de7a197..8a57f0930a 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -10,6 +10,9 @@ const ( CattlePrefix = "cattle.io" // CattleProjectPrefix is the prefix used in all Rancher project related annotations and labels CattleProjectPrefix = "cattle.io/projectId" + // PXIncrementalCountAnnotation is the annotation used to set cloud backup incremental count + // for volume + PXIncrementalCountAnnotation = "portworx.io/cloudsnap-incremental-count" ) // ParseKeyValueList parses a list of key=values string into a map From c488643959ad24bfc6c80b818bf909d4602853b0 Mon Sep 17 00:00:00 2001 From: Aditya Dani Date: Tue, 30 Aug 2022 09:36:57 -0700 Subject: [PATCH 06/97] Log when stork takes a forceful snapshot --- pkg/schedule/schedule.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/schedule/schedule.go b/pkg/schedule/schedule.go index 11651e1672..eab730e35e 100644 --- a/pkg/schedule/schedule.go +++ b/pkg/schedule/schedule.go @@ -238,7 +238,7 @@ func GetOptions(policyName string, namespace string, policyType stork_api.Schedu if currentDay == scheduledDay { options[utils.PXIncrementalCountAnnotation] = "0" } - logrus.Debugf("Forcing full-snapshot for daily snapshotschedule policy on the day %s", schedulePolicy.Policy.Daily.ForceFullSnapshotDay) + logrus.Infof("Forcing full-snapshot for daily snapshotschedule policy on the day %s", schedulePolicy.Policy.Daily.ForceFullSnapshotDay) } return options, nil case stork_api.SchedulePolicyTypeWeekly: From 096e5ca1f028387611c72f9c1fbaf77a2bcef5c6 Mon Sep 17 00:00:00 2001 From: Ram Date: Mon, 1 Aug 2022 11:49:50 +0530 Subject: [PATCH 07/97] PWX-26033: Dont include FA/FB device for migration Signed-off-by: Ram --- drivers/volume/portworx/portworx.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/volume/portworx/portworx.go b/drivers/volume/portworx/portworx.go index 307e981cfe..b0c6494124 100644 --- a/drivers/volume/portworx/portworx.go +++ b/drivers/volume/portworx/portworx.go @@ -2421,7 +2421,7 @@ func (p *portworx) StartMigration(migration *storkapi.Migration) ([]*storkapi.Mi return nil, fmt.Errorf("error getting list of volumes to migrate: %v", err) } for _, pvc := range pvcList.Items { - if !p.OwnsPVC(core.Instance(), &pvc) { + if !p.IsSupportedPVC(core.Instance(), &pvc, true) { continue } if resourcecollector.SkipResource(pvc.Annotations) { From 520c01043437ac2a8ca9fa6f942e16a12c9e5741 Mon Sep 17 00:00:00 2001 From: Ram Date: Mon, 1 Aug 2022 15:14:30 +0530 Subject: [PATCH 08/97] PWX-24976: Register ResourceTransformation CR api Signed-off-by: Ram --- pkg/apis/stork/v1alpha1/register.go | 2 + .../stork/v1alpha1/resourcetransformation.go | 135 ++++++++++++++++++ pkg/migration/controllers/clusterpair.go | 16 +-- 3 files changed, 145 insertions(+), 8 deletions(-) create mode 100644 pkg/apis/stork/v1alpha1/resourcetransformation.go diff --git a/pkg/apis/stork/v1alpha1/register.go b/pkg/apis/stork/v1alpha1/register.go index 408649670f..ffa01bad0a 100644 --- a/pkg/apis/stork/v1alpha1/register.go +++ b/pkg/apis/stork/v1alpha1/register.go @@ -66,6 +66,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ApplicationBackupScheduleList{}, &DataExport{}, &DataExportList{}, + &ResourceTransformation{}, + &ResourceTransformationList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/pkg/apis/stork/v1alpha1/resourcetransformation.go b/pkg/apis/stork/v1alpha1/resourcetransformation.go new file mode 100644 index 0000000000..813bfab7fb --- /dev/null +++ b/pkg/apis/stork/v1alpha1/resourcetransformation.go @@ -0,0 +1,135 @@ +package v1alpha1 + +import ( + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // ResourceTransformationResourceName is name for "ResourceTransformation" resource + ResourceTransformationResourceName = "resourcetransformation" + // ResourceTransformationResourcePlural is plural for "ResourceTransformation" resource + ResourceTransformationResourcePlural = "resourcetransformations" +) + +// ResourceTransformationOperationType is type of operation supported for +// resource transformation +type ResourceTransformationOperationType string + +const ( + // AddPathValue is used to add path+value in specified resource spec + // if path+value already exist this operation will override value + // at given path + AddResourcePath ResourceTransformationOperationType = "add" + // ModifyResourcePathValue is used to merge value at speficied resource path + // in case of a slice, entries will be appended. + // in case of a keypair, entries will be merged + ModifyResourcePathValue ResourceTransformationOperationType = "modify" + // DeletePath from resource specification + DeleteResourcePath ResourceTransformationOperationType = "delete" + // JsonResourcePatch will patch json in given resource spec + JsonResourcePatch ResourceTransformationOperationType = "jsonpatch" +) + +// ResourceTransformationValueType is types of value supported on +// path in resource specs +type ResourceTransformationValueType string + +const ( + // IntResourceType is to update integer value to specified resource path + IntResourceType ResourceTransformationValueType = "int" + // StringResourceType is to update string value to specified resource path + StringResourceType ResourceTransformationValueType = "string" + // BoolResourceType is to update boolean value to specified resource path + BoolResourceType ResourceTransformationValueType = "bool" + // SliceResourceType is to update slice value to specified resource path + SliceResourceType ResourceTransformationValueType = "slice" + // KeyPairResourceType is to update keypair value to specified resource path + KeyPairResourceType ResourceTransformationValueType = "keypair" +) + +// ResourceTransformationStatsusType is status of resource transformation CR +type ResourceTransformationStatusType string + +const ( + // ResourceTransformationStatusInitial represents initial state of resource + // transformation CR + ResourceTransformationStatusInitial ResourceTransformationStatusType = "" + // ResourceTransformationStatusInProgress represents dry run in progress state + // of resource transformation + ResourceTransformationStatusInProgress ResourceTransformationStatusType = "InProgress" + // ResourceTransformationStatusReady represents ready state of resource + // transformation CR + ResourceTransformationStatusReady ResourceTransformationStatusType = "Ready" + // ResourceTransformationStatusFailed represents dry-run failed state of resource + // transformation CR + ResourceTransformationStatusFailed ResourceTransformationStatusType = "Failed" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceTransformation represents a ResourceTransformation CR object +type ResourceTransformation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ResourceTransformationSpec `json:"specs"` + Status ResourceTransformationStatus `json:"status"` +} + +type ResourceTransformationStatus struct { + Status ResourceTransformationStatusType `json:"status"` + Resources []*TransformResourceInfo `json:"resources"` +} + +// TransformResourceInfo is the info of resources selected +// for transformation +type TransformResourceInfo struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + meta.GroupVersionKind `json:",inline"` + Status ResourceTransformationStatusType `json:"status"` + Reason string `json:"reason"` +} + +// ResourceTransformationSpec is used to update k8s resources +//before migration/restore +type ResourceTransformationSpec struct { + Objects []TransformSpecs `json:"transformSpecs"` +} + +// TransformSpecs specifies the patch to update selected resource +// before migration/restore +type TransformSpecs struct { + // Resource is GroupVersionKind for k8s resources + // should be in format `group/version/kind" + Resource string `json:"resource"` + // Selectors label selector to filter out resource for + // patching + Selectors map[string]string `json:"selectors"` + // Paths collection of resource path to update + Paths []ResourcePaths `json:"paths"` +} + +// ResourcePaths specifies the patch to modify resource +// before migration/restore +type ResourcePaths struct { + // Path k8s resource for operation + Path string `json:"path"` + // Value for given k8s path + Value string `json:"value"` + // Type of value specified int/bool/string/slice/keypair + Type ResourceTransformationValueType `json:"type"` + // Operation to be performed on path + // add/modify/delete/replace/jsonPatch + Operation ResourceTransformationOperationType `json:"operation"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceTransformationList is a list of ResourceTransformations +type ResourceTransformationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResourceTransformation `json:"items"` +} diff --git a/pkg/migration/controllers/clusterpair.go b/pkg/migration/controllers/clusterpair.go index 97f2f8f84c..c33b1f4a14 100644 --- a/pkg/migration/controllers/clusterpair.go +++ b/pkg/migration/controllers/clusterpair.go @@ -64,9 +64,9 @@ func (c *ClusterPairController) Init(mgr manager.Manager) error { func (c *ClusterPairController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { logrus.Tracef("Reconciling ClusterPair %s/%s", request.Namespace, request.Name) - // Fetch the ApplicationBackup instance - backup := &stork_api.ClusterPair{} - err := c.client.Get(context.TODO(), request.NamespacedName, backup) + // Fetch the ClusterPair instance + clusterPair := &stork_api.ClusterPair{} + err := c.client.Get(context.TODO(), request.NamespacedName, clusterPair) if err != nil { if errors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. @@ -78,13 +78,13 @@ func (c *ClusterPairController) Reconcile(ctx context.Context, request reconcile return reconcile.Result{RequeueAfter: controllers.DefaultRequeueError}, err } - if !controllers.ContainsFinalizer(backup, controllers.FinalizerCleanup) { - controllers.SetFinalizer(backup, controllers.FinalizerCleanup) - return reconcile.Result{Requeue: true}, c.client.Update(context.TODO(), backup) + if !controllers.ContainsFinalizer(clusterPair, controllers.FinalizerCleanup) { + controllers.SetFinalizer(clusterPair, controllers.FinalizerCleanup) + return reconcile.Result{Requeue: true}, c.client.Update(context.TODO(), clusterPair) } - if err = c.handle(context.TODO(), backup); err != nil { - logrus.Errorf("%s: %s/%s: %s", reflect.TypeOf(c), backup.Namespace, backup.Name, err) + if err = c.handle(context.TODO(), clusterPair); err != nil { + logrus.Errorf("%s: %s/%s: %s", reflect.TypeOf(c), clusterPair.Namespace, clusterPair.Name, err) return reconcile.Result{RequeueAfter: controllers.DefaultRequeueError}, err } From 90c3f2e57fb9676ecf025c87b499c6f8eed3967c Mon Sep 17 00:00:00 2001 From: Ram Date: Tue, 9 Aug 2022 18:20:24 +0530 Subject: [PATCH 09/97] Register and handle Resource Transformation events via controller - validate specs for resource transformation cr - apply patch on unstruct k8s objects - run patched resources on dry run namespace with DryRun option set to all Signed-off-by: Ram --- pkg/log/log.go | 12 + pkg/migration/controllers/migration.go | 6 +- .../controllers/resourcetransformation.go | 322 ++++++++++++++++++ pkg/migration/migration.go | 3 +- pkg/resourcecollector/resourcecollector.go | 10 +- 5 files changed, 349 insertions(+), 4 deletions(-) create mode 100644 pkg/migration/controllers/resourcetransformation.go diff --git a/pkg/log/log.go b/pkg/log/log.go index cd3b938364..5f0f9da817 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -157,6 +157,18 @@ func MigrationLog(migration *storkv1.Migration) *logrus.Entry { return logrus.WithFields(logrus.Fields{}) } +// TransformLog formats a log message with resource transformation CR information +func TransformLog(transform *storkv1.ResourceTransformation) *logrus.Entry { + if transform != nil { + return logrus.WithFields(logrus.Fields{ + "ResourceTransformationName": transform.Name, + "ResourceTransformationNamespace": transform.Namespace, + }) + } + + return logrus.WithFields(logrus.Fields{}) +} + // MigrationScheduleLog formats a log message with migrationschedule information func MigrationScheduleLog(migrationSchedule *storkv1.MigrationSchedule) *logrus.Entry { if migrationSchedule != nil { diff --git a/pkg/migration/controllers/migration.go b/pkg/migration/controllers/migration.go index 05804f7cb7..56848284d9 100644 --- a/pkg/migration/controllers/migration.go +++ b/pkg/migration/controllers/migration.go @@ -3,13 +3,14 @@ package controllers import ( "context" "fmt" - "github.com/libopenstorage/stork/pkg/utils" "math/rand" "reflect" "strconv" "strings" "time" + "github.com/libopenstorage/stork/pkg/utils" + "github.com/go-openapi/inflect" "github.com/libopenstorage/stork/drivers/volume" stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" @@ -65,7 +66,8 @@ const ( PVReclaimAnnotation = "stork.libopenstorage.org/reclaimPolicy" // StorkAnnotationPrefix for resources created/managed by stork StorkAnnotationPrefix = "stork.libopenstorage.org/" - + // StorkNamespacePrefix for namespace created for applying dry run resources + StorkNamespacePrefix = "stork-transform" // Max number of times to retry applying resources on the desination maxApplyRetries = 10 deletedMaxRetries = 12 diff --git a/pkg/migration/controllers/resourcetransformation.go b/pkg/migration/controllers/resourcetransformation.go new file mode 100644 index 0000000000..353df8ebd1 --- /dev/null +++ b/pkg/migration/controllers/resourcetransformation.go @@ -0,0 +1,322 @@ +package controllers + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/go-openapi/inflect" + "github.com/libopenstorage/stork/drivers/volume" + stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/libopenstorage/stork/pkg/controllers" + "github.com/libopenstorage/stork/pkg/k8sutils" + "github.com/libopenstorage/stork/pkg/log" + "github.com/libopenstorage/stork/pkg/resourcecollector" + "github.com/libopenstorage/stork/pkg/version" + "github.com/portworx/sched-ops/k8s/apiextensions" + "github.com/portworx/sched-ops/k8s/core" + "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/record" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + resourceTransformationController = "resource-transformation-controller" +) + +// NewResourceTransformation creates a new instance of ResourceTransformation Manager +func NewResourceTransformation(mgr manager.Manager, d volume.Driver, r record.EventRecorder, rc resourcecollector.ResourceCollector) *ResourceTransformationController { + return &ResourceTransformationController{ + client: mgr.GetClient(), + recorder: r, + resourceCollector: rc, + } +} + +// ResourceTransformationController controller to watch over ResourceTransformation CR +type ResourceTransformationController struct { + client runtimeclient.Client + + resourceCollector resourcecollector.ResourceCollector + recorder record.EventRecorder +} + +// Init initialize the resource transformation controller +func (r *ResourceTransformationController) Init(mgr manager.Manager) error { + err := r.createCRD() + if err != nil { + return err + } + + return controllers.RegisterTo(mgr, resourceTransformationController, r, &stork_api.ResourceTransformation{}) +} + +// Reconcile manages ResourceTransformation resources. +func (r *ResourceTransformationController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + logrus.Infof("Reconciling ResourceTransformation %s/%s", request.Namespace, request.Name) + + resourceTransformation := &stork_api.ResourceTransformation{} + err := r.client.Get(context.TODO(), request.NamespacedName, resourceTransformation) + if err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{RequeueAfter: controllers.DefaultRequeueError}, err + } + + if !controllers.ContainsFinalizer(resourceTransformation, controllers.FinalizerCleanup) { + controllers.SetFinalizer(resourceTransformation, controllers.FinalizerCleanup) + return reconcile.Result{Requeue: true}, r.client.Update(context.TODO(), resourceTransformation) + } + + if err = r.handle(context.TODO(), resourceTransformation); err != nil { + logrus.Errorf("%s: %s/%s: %s", reflect.TypeOf(r), resourceTransformation.Namespace, resourceTransformation.Name, err) + return reconcile.Result{RequeueAfter: controllers.DefaultRequeueError}, err + } + + return reconcile.Result{RequeueAfter: controllers.DefaultRequeue}, nil +} + +func getTransformNamespace(ns string) string { + return StorkNamespacePrefix + "-" + ns +} +func (r *ResourceTransformationController) handle(ctx context.Context, transform *stork_api.ResourceTransformation) error { + var err error + if transform.DeletionTimestamp != nil { + if transform.GetFinalizers() != nil { + controllers.RemoveFinalizer(transform, controllers.FinalizerCleanup) + return r.client.Update(ctx, transform) + } + + return nil + } + switch transform.Status.Status { + case stork_api.ResourceTransformationStatusInitial: + ns := &v1.Namespace{} + ns.Name = getTransformNamespace(transform.Namespace) + _, err := core.Instance().CreateNamespace(ns) + if err != nil { + message := fmt.Sprintf("Unable to create resource transformation namespace: %v", err) + log.TransformLog(transform).Errorf(message) + r.recorder.Event(transform, + v1.EventTypeWarning, + string(stork_api.ResourceTransformationStatusFailed), + message) + transform.Status.Status = stork_api.ResourceTransformationStatusFailed + err := r.client.Update(ctx, transform) + if err != nil { + return err + } + return nil + } + err = r.validateSpecPath(transform) + if err != nil { + message := fmt.Sprintf("Unsupported resource for resource transformation found: %v", err) + log.TransformLog(transform).Errorf(message) + r.recorder.Event(transform, + v1.EventTypeWarning, + string(stork_api.ResourceTransformationStatusFailed), + message) + transform.Status.Status = stork_api.ResourceTransformationStatusFailed + err := r.client.Update(ctx, transform) + if err != nil { + return err + } + return nil + } + case stork_api.ResourceTransformationStatusInProgress: + err = r.validateTransformResource(ctx, transform) + if err != nil { + message := fmt.Sprintf("Error validating resource transformation specs: %v", err) + log.TransformLog(transform).Errorf(message) + r.recorder.Event(transform, + v1.EventTypeWarning, + string(stork_api.ResourceTransformationStatusFailed), + message) + transform.Status.Status = stork_api.ResourceTransformationStatusFailed + err := r.client.Update(ctx, transform) + if err != nil { + return err + } + } + case stork_api.ResourceTransformationStatusReady: + case stork_api.ResourceTransformationStatusFailed: + return nil + default: + log.TransformLog(transform).Errorf("Invalid status for ResourceTransformation: %v", transform.Status.Status) + } + return nil +} + +func (r *ResourceTransformationController) validateSpecPath(transform *stork_api.ResourceTransformation) error { + for _, spec := range transform.Spec.Objects { + _, _, kind, err := getGVK(spec.Resource) + if err != nil { + return err + } + if !resourcecollector.GetSupportedK8SResources(kind, []string{}) { + return fmt.Errorf("unsupported resource kind for transformation: %s", kind) + } + for _, path := range spec.Paths { + // TODO: this can be validated via CRDs as well, when we have defined schema + // for stork crds + // https://portworx.atlassian.net/browse/PWX-26465 + if path.Operation == stork_api.JsonResourcePatch { + return fmt.Errorf("json patch for resources is not supported, operation: %s", path.Operation) + } + if !(path.Operation == stork_api.AddResourcePath || path.Operation == stork_api.DeleteResourcePath) { + return fmt.Errorf("unsupported resource patch operation given for kind :%s, operation: %s", kind, path.Operation) + } + if !(path.Type == stork_api.BoolResourceType || path.Type == stork_api.IntResourceType || + path.Type == stork_api.StringResourceType || path.Type == stork_api.SliceResourceType || + path.Type == stork_api.KeyPairResourceType) { + return fmt.Errorf("unsupported type for resource %s, path %s, type: %s", kind, path.Path, path.Type) + } + } + } + return nil +} + +func (r *ResourceTransformationController) validateTransformResource(ctx context.Context, transform *stork_api.ResourceTransformation) error { + resourceCollectorOpts := resourcecollector.Options{} + for _, spec := range transform.Spec.Objects { + group, version, kind, err := getGVK(spec.Resource) + if err != nil { + return fmt.Errorf("invalid resource type should be in format //, actual: %s", spec.Resource) + } + resource := metav1.APIResource{ + Name: strings.ToLower(inflect.Pluralize(kind)), + Kind: kind, + Version: version, + Namespaced: true, + Group: group, + } + log.TransformLog(transform).Infof("querying resource: %v", resource) + objects, err := r.resourceCollector.GetResourcesForType( + resource, + nil, + []string{transform.Namespace}, + spec.Selectors, + nil, + false, + resourceCollectorOpts, + ) + if err != nil { + r.recorder.Event(transform, + v1.EventTypeWarning, + string(stork_api.ResourceTransformationStatusFailed), + fmt.Sprintf("Error getting resource kind:%s, err: %v", kind, err)) + log.TransformLog(transform).Errorf("Error getting resources kind:%s, err: %v", kind, err) + return err + } + for _, path := range spec.Paths { + if !(path.Operation == stork_api.AddResourcePath || path.Operation == stork_api.DeleteResourcePath) { + return fmt.Errorf("unsupported operation type for given path : %s", path.Operation) + } + for _, object := range objects.Items { + content := object.UnstructuredContent() + metadata, err := meta.Accessor(object) + if err != nil { + log.TransformLog(transform).Errorf("Unable to read metadata for resource %v, err: %v", kind, err) + return err + } + resInfo := &stork_api.TransformResourceInfo{ + Name: metadata.GetName(), + Namespace: metadata.GetNamespace(), + GroupVersionKind: metav1.GroupVersionKind(object.GetObjectKind().GroupVersionKind()), + } + err = unstructured.SetNestedField(content, path.Value, strings.Split(path.Path, ".")...) + if err != nil { + log.TransformLog(transform).Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, kind, resInfo.Namespace, resInfo.Name, err) + resInfo.Status = stork_api.ResourceTransformationStatusFailed + resInfo.Reason = err.Error() + } + unstructured, ok := object.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unable to cast object to unstructured: %v", object) + } + // TODO: we can pass in remote config and dry run on remote cluster as well + localconfig, err := clientcmd.BuildConfigFromFlags("", "") + if err != nil { + return err + } + localInterface, err := dynamic.NewForConfig(localconfig) + if err != nil { + return err + } + resource := &metav1.APIResource{ + Name: inflect.Pluralize(strings.ToLower(kind)), + Namespaced: len(metadata.GetNamespace()) > 0, + } + dynamicClient := localInterface.Resource( + object.GetObjectKind().GroupVersionKind().GroupVersion().WithResource(resource.Name)).Namespace(metadata.GetNamespace()) + + unstructured.SetNamespace(getTransformNamespace(transform.Namespace)) + log.TransformLog(transform).Infof("Applying %v %v", object.GetObjectKind(), metadata.GetName()) + _, err = dynamicClient.Create(context.TODO(), unstructured, metav1.CreateOptions{DryRun: []string{"All"}}) + if err != nil { + log.TransformLog(transform).Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, kind, resInfo.Namespace, resInfo.Name, err) + resInfo.Status = stork_api.ResourceTransformationStatusFailed + resInfo.Reason = err.Error() + } else { + log.TransformLog(transform).Infof("Applied patch path %s on resource kind: %s/,%s/%s", path, kind, resInfo.Namespace, resInfo.Name) + resInfo.Status = stork_api.ResourceTransformationStatusReady + resInfo.Reason = "" + } + transform.Status.Resources = append(transform.Status.Resources, resInfo) + } + } + } + transform.Status.Status = stork_api.ResourceTransformationStatusReady + return r.client.Update(ctx, transform) +} + +func getGVK(resource string) (string, string, string, error) { + gvk := strings.Split(resource, "/") + if len(gvk) != 3 { + return "", "", "", fmt.Errorf("invalid resource kind :%s", resource) + } + return gvk[0], gvk[1], gvk[2], nil +} + +func (c *ResourceTransformationController) createCRD() error { + resource := apiextensions.CustomResource{ + Name: stork_api.ResourceTransformationResourceName, + Plural: stork_api.ResourceTransformationResourcePlural, + Group: stork_api.SchemeGroupVersion.Group, + Version: stork_api.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.NamespaceScoped, + Kind: reflect.TypeOf(stork_api.ResourceTransformation{}).Name(), + } + ok, err := version.RequiresV1Registration() + if err != nil { + return err + } + if ok { + err := k8sutils.CreateCRD(resource) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + return apiextensions.Instance().ValidateCRD(resource.Plural+"."+resource.Group, validateCRDTimeout, validateCRDInterval) + } + err = apiextensions.Instance().CreateCRDV1beta1(resource) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + return apiextensions.Instance().ValidateCRDV1beta1(resource, validateCRDTimeout, validateCRDInterval) +} diff --git a/pkg/migration/migration.go b/pkg/migration/migration.go index b63adf001d..ddd20826c1 100644 --- a/pkg/migration/migration.go +++ b/pkg/migration/migration.go @@ -39,5 +39,6 @@ func (m *Migration) Init(mgr manager.Manager, migrationAdminNamespace string, mi if err != nil { return fmt.Errorf("error initializing migration schedule controller: %v", err) } - return nil + rt := controllers.NewResourceTransformation(mgr, m.Driver, m.Recorder, m.ResourceCollector) + return rt.Init(mgr) } diff --git a/pkg/resourcecollector/resourcecollector.go b/pkg/resourcecollector/resourcecollector.go index 4ee0003d0a..e8a9369e2c 100644 --- a/pkg/resourcecollector/resourcecollector.go +++ b/pkg/resourcecollector/resourcecollector.go @@ -148,7 +148,15 @@ func resourceToBeCollected(resource metav1.APIResource, grp schema.GroupVersion, return true } } - switch resource.Kind { + + return GetSupportedK8SResources(resource.Kind, optionalResourceTypes) +} + +// GetSupportedK8SResources returns supported k8s resources by resource collector +// pkgs, this can be used to validate list of resources supported by different stork +// controller like migration, backup, clone etc +func GetSupportedK8SResources(kind string, optionalResourceTypes []string) bool { + switch kind { case "PersistentVolumeClaim", "PersistentVolume", "Deployment", From 8a6c5026a251f7d9889459ef7d4f6f856594f87f Mon Sep 17 00:00:00 2001 From: Ram Date: Mon, 1 Aug 2022 22:11:18 +0530 Subject: [PATCH 10/97] PWX-24851: Enhance UX experience for setting up clusterpair for async-dr setups - query cluster pair token using px endpoint + port - query port by looking at px-api service rest port Signed-off-by: Ram --- pkg/storkctl/clusterpair.go | 92 ++++++++++++++++++++++++++++++++++--- 1 file changed, 86 insertions(+), 6 deletions(-) diff --git a/pkg/storkctl/clusterpair.go b/pkg/storkctl/clusterpair.go index 64043a483c..1073761e75 100644 --- a/pkg/storkctl/clusterpair.go +++ b/pkg/storkctl/clusterpair.go @@ -3,13 +3,18 @@ package storkctl import ( "bufio" "fmt" - "github.com/libopenstorage/stork/pkg/utils" "io/ioutil" + "net" "os" "reflect" + "strconv" "strings" + "github.com/libopenstorage/stork/pkg/utils" + storkv1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/libopenstorage/stork/pkg/utils" + "github.com/portworx/sched-ops/k8s/core" storkops "github.com/portworx/sched-ops/k8s/stork" "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/api/validation" @@ -248,14 +253,22 @@ func newCreateClusterPairCommand(cmdFactory Factory, ioStreams genericclioptions util.CheckErr(err) return } - - srcClusterPair, err := generateClusterPair(clusterPairName, cmdFactory.GetNamespace(), dIP, dPort, destToken, dFile, projectMappingsStr, false) + printMsg("Using PX-Service Endpoint of DR cluster to create clusterpair...\n", ioStreams.Out) + ip, port, token, err := getClusterPairParams(dFile, dIP) if err != nil { + err := fmt.Errorf("unable to create clusterpair from source to DR cluster. Err: %v", err) util.CheckErr(err) return } + dIP = ip + if dPort == "" { + dPort = port + } + if destToken == "" { + destToken = token + } - destClusterPair, err := generateClusterPair(clusterPairName, cmdFactory.GetNamespace(), sIP, sPort, srcToken, sFile, projectMappingsStr, true) + srcClusterPair, err := generateClusterPair(clusterPairName, cmdFactory.GetNamespace(), dIP, dPort, destToken, dFile, projectMappingsStr, false) if err != nil { util.CheckErr(err) return @@ -274,6 +287,28 @@ func newCreateClusterPairCommand(cmdFactory Factory, ioStreams genericclioptions return } printMsg("ClusterPair "+clusterPairName+" created successfully on source cluster", ioStreams.Out) + if sFile == "" { + return + } + printMsg("Using PX-Service endpoints of source cluster to create clusterpair...\n", ioStreams.Out) + ip, port, token, err = getClusterPairParams(sFile, sIP) + if err != nil { + err := fmt.Errorf("unable to create clusterpair from DR to source cluster. Err: %v", err) + util.CheckErr(err) + return + } + sIP = ip + if sPort == "" { + sPort = port + } + if srcToken == "" { + srcToken = token + } + destClusterPair, err := generateClusterPair(clusterPairName, cmdFactory.GetNamespace(), sIP, sPort, srcToken, sFile, projectMappingsStr, true) + if err != nil { + util.CheckErr(err) + return + } // Create cluster-pair on dest cluster conf, err = getConfig(dFile).ClientConfig() if err != nil { @@ -297,8 +332,8 @@ func newCreateClusterPairCommand(cmdFactory Factory, ioStreams genericclioptions createClusterPairCommand.Flags().StringVarP(&dIP, "dest-ip", "", "", "kube-config of destination cluster") createClusterPairCommand.Flags().StringVarP(&dPort, "dest-port", "", "9001", "port of storage node from destination cluster") createClusterPairCommand.Flags().StringVarP(&dFile, "dest-kube-file", "", "", "kube-config of destination cluster") - createClusterPairCommand.Flags().StringVarP(&srcToken, "src-token", "", "", "source cluster token for cluster pairing") - createClusterPairCommand.Flags().StringVarP(&destToken, "dest-token", "", "", "destination cluster token for cluster pairing") + createClusterPairCommand.Flags().StringVarP(&srcToken, "src-token", "", "", "(optional)source cluster token for cluster pairing") + createClusterPairCommand.Flags().StringVarP(&destToken, "dest-token", "", "", "(optional)destination cluster token for cluster pairing") createClusterPairCommand.Flags().StringVarP(&projectMappingsStr, "project-mappings", "", "", "project mappings between source and destination clusters, use comma-separated = pairs (Currently supported only for Rancher)") @@ -436,3 +471,48 @@ func getConfig(configFile string) clientcmd.ClientConfig { configOverrides := &clientcmd.ConfigOverrides{} return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(configLoadingRules, configOverrides) } + +func getClusterPairParams(config, endpoint string) (string, string, string, error) { + var ip, port, token string + client, err := core.NewInstanceFromConfigFile(config) + if err != nil { + return ip, port, token, err + } + + services, err := client.ListServices("", meta.ListOptions{LabelSelector: "name=portworx-api"}) + if err != nil || len(services.Items) == 0 { + err := fmt.Errorf("unable to retrieve portworx-api service from DR cluster. Err: %v", err) + return ip, port, token, err + } + // TODO: in case of setting up aync-dr over cloud, + // users set up different service as load-balancer over px apis + // accept px-service name as env variable + svc := services.Items[0] + ip = endpoint + if ip == "" { + // this works only if px service is converted as load balancer type + // TODO: for 2 cluster where worker nodes are reachable, figure out + // any one worker ip by looking at px/enabled label + ip = svc.Spec.LoadBalancerIP + } + pxToken := os.Getenv("PX_AUTH_TOKEN") + for _, svcPort := range svc.Spec.Ports { + if svcPort.Name == "px-api" { + port = strconv.Itoa(int(svcPort.Port)) + break + } + } + pxEndpoint := net.JoinHostPort(ip, port) + // TODO: support https as well + clnt, err := clusterclient.NewAuthClusterClient("http://"+pxEndpoint, "v1", pxToken, "") + if err != nil { + return ip, port, token, err + } + mgr := clusterclient.ClusterManager(clnt) + resp, err := mgr.GetPairToken(false) + if err != nil { + return ip, port, token, err + } + token = resp.GetToken() + return ip, port, token, nil +} From d0bade8c1bfdb3197e328a14de283378c60c01e7 Mon Sep 17 00:00:00 2001 From: Ram Date: Thu, 18 Aug 2022 19:54:48 +0530 Subject: [PATCH 11/97] pwx-24979: integrate transform resource api with migration path - accept resource transformation in migration spec - update resource as per transformation rule Signed-off-by: Ram --- pkg/apis/stork/v1alpha1/migration.go | 1 + .../stork/v1alpha1/resourcetransformation.go | 11 ++++ pkg/migration/controllers/migration.go | 54 +++++++++++++++++-- 3 files changed, 62 insertions(+), 4 deletions(-) diff --git a/pkg/apis/stork/v1alpha1/migration.go b/pkg/apis/stork/v1alpha1/migration.go index e3ec6cab72..9067450f13 100644 --- a/pkg/apis/stork/v1alpha1/migration.go +++ b/pkg/apis/stork/v1alpha1/migration.go @@ -27,6 +27,7 @@ type MigrationSpec struct { PostExecRule string `json:"postExecRule"` IncludeOptionalResourceTypes []string `json:"includeOptionalResourceTypes"` SkipDeletedNamespaces *bool `json:"skipDeletedNamespaces"` + UpdateResourceSpecs string `json:"updateResourceSpecs"` } // MigrationStatus is the status of a migration operation diff --git a/pkg/apis/stork/v1alpha1/resourcetransformation.go b/pkg/apis/stork/v1alpha1/resourcetransformation.go index 813bfab7fb..b6abb33431 100644 --- a/pkg/apis/stork/v1alpha1/resourcetransformation.go +++ b/pkg/apis/stork/v1alpha1/resourcetransformation.go @@ -35,6 +35,8 @@ const ( // path in resource specs type ResourceTransformationValueType string +type KindResourceTransform map[string][]TransformResourceInfo + const ( // IntResourceType is to update integer value to specified resource path IntResourceType ResourceTransformationValueType = "int" @@ -90,6 +92,7 @@ type TransformResourceInfo struct { meta.GroupVersionKind `json:",inline"` Status ResourceTransformationStatusType `json:"status"` Reason string `json:"reason"` + Specs TransformSpecs `json:"specs"` } // ResourceTransformationSpec is used to update k8s resources @@ -111,6 +114,14 @@ type TransformSpecs struct { Paths []ResourcePaths `json:"paths"` } +type TransformSpecPatch struct { + GVK map[string]PatchStruct +} +type PatchStruct struct { + // namespace - resource in namespace + Resources map[string]TransformResourceInfo +} + // ResourcePaths specifies the patch to modify resource // before migration/restore type ResourcePaths struct { diff --git a/pkg/migration/controllers/migration.go b/pkg/migration/controllers/migration.go index 56848284d9..ecc821e241 100644 --- a/pkg/migration/controllers/migration.go +++ b/pkg/migration/controllers/migration.go @@ -366,7 +366,37 @@ func (m *MigrationController) handle(ctx context.Context, migration *stork_api.M err.Error()) err = m.updateMigrationCR(context.Background(), migration) if err != nil { - log.MigrationLog(migration).Errorf("Error updating") + log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) + } + return nil + } + } + // Make sure if transformation CR is in ready state + if migration.Spec.UpdateResourceSpecs != "" { + resp, err := storkops.Instance().GetResourceTransformation(migration.Spec.UpdateResourceSpecs, ns) + if err != nil && !errors.IsNotFound(err) { + errMsg := fmt.Sprintf("unable to retrive transformation %s, err: %v", migration.Spec.UpdateResourceSpecs, err) + log.MigrationLog(migration).Errorf(errMsg) + m.recorder.Event(migration, + v1.EventTypeWarning, + string(stork_api.MigrationStatusFailed), + err.Error()) + err = m.updateMigrationCR(context.Background(), migration) + if err != nil { + log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) + } + return nil + } + if resp != nil && resp.Status.Status != stork_api.ResourceTransformationStatusReady { + errMsg := fmt.Sprintf("transformation %s is not in ready state: %s", migration.Spec.UpdateResourceSpecs, resp.Status.Status) + log.MigrationLog(migration).Errorf(errMsg) + m.recorder.Event(migration, + v1.EventTypeWarning, + string(stork_api.MigrationStatusFailed), + err.Error()) + err = m.updateMigrationCR(context.Background(), migration) + if err != nil { + log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) } return nil } @@ -995,7 +1025,11 @@ func (m *MigrationController) prepareResources( if err != nil { return err } - + resPatch, err := resourcecollector.GetResourcePatch(migration.Spec.UpdateResourceSpecs, migration.Spec.Namespaces) + if err != nil { + log.MigrationLog(migration). + Warnf("Unable to get transformation spec from :%s, skipping transformation for this migration, err: %v", err) + } for _, o := range objects { metadata, err := meta.Accessor(o) if err != nil { @@ -1009,7 +1043,7 @@ func (m *MigrationController) prepareResources( return fmt.Errorf("error preparing PV resource %v: %v", metadata.GetName(), err) } case "Deployment", "StatefulSet", "DeploymentConfig", "IBPPeer", "IBPCA", "IBPConsole", "IBPOrderer", "ReplicaSet": - err := m.prepareApplicationResource(migration, clusterPair, o) + err := m.prepareApplicationResource(migration, clusterPair, o, resPatch[metadata.GetNamespace()][resource.Kind]) if err != nil { return fmt.Errorf("error preparing %v resource %v: %v", o.GetObjectKind().GroupVersionKind().Kind, metadata.GetName(), err) } @@ -1018,6 +1052,18 @@ func (m *MigrationController) prepareResources( if err != nil { return fmt.Errorf("error preparing %v resource %v: %v", o.GetObjectKind().GroupVersionKind().Kind, metadata.GetName(), err) } + default: + // if namespace has resource transformation spec + if ns, found := resPatch[metadata.GetNamespace()]; found { + // if transformspec present for current resource kind + if kind, ok := ns[resource.Kind]; ok { + err := resourcecollector.TransformResources(o, kind, metadata.GetName(), metadata.GetNamespace()) + if err != nil { + return fmt.Errorf("error updating %v resource %v: %v", o.GetObjectKind().GroupVersionKind().Kind, metadata.GetName(), err) + } + } + } + // do nothing } // prepare CR resources @@ -1034,7 +1080,6 @@ func (m *MigrationController) prepareResources( } } } - } return nil } @@ -1242,6 +1287,7 @@ func (m *MigrationController) prepareApplicationResource( migration *stork_api.Migration, clusterPair *stork_api.ClusterPair, object runtime.Unstructured, + resPatch []stork_api.TransformResourceInfo, ) error { content := object.UnstructuredContent() if clusterPair.Spec.PlatformOptions.Rancher != nil && From c26a11732b622aaac745db5222a40cfc2191891c Mon Sep 17 00:00:00 2001 From: Ram Date: Thu, 18 Aug 2022 19:56:24 +0530 Subject: [PATCH 12/97] Add transformation rule handler in resourcecollector - allow dry run for keypair and slice value type Signed-off-by: Ram --- .../controllers/resourcetransformation.go | 37 +++-- .../resourcetransformation.go | 140 ++++++++++++++++++ 2 files changed, 162 insertions(+), 15 deletions(-) create mode 100644 pkg/resourcecollector/resourcetransformation.go diff --git a/pkg/migration/controllers/resourcetransformation.go b/pkg/migration/controllers/resourcetransformation.go index 353df8ebd1..d2c0d8b4c0 100644 --- a/pkg/migration/controllers/resourcetransformation.go +++ b/pkg/migration/controllers/resourcetransformation.go @@ -139,6 +139,10 @@ func (r *ResourceTransformationController) handle(ctx context.Context, transform } return nil } + transform.Status.Status = stork_api.ResourceTransformationStatusInProgress + if err = r.client.Update(ctx, transform); err != nil { + return err + } case stork_api.ResourceTransformationStatusInProgress: err = r.validateTransformResource(ctx, transform) if err != nil { @@ -179,7 +183,8 @@ func (r *ResourceTransformationController) validateSpecPath(transform *stork_api if path.Operation == stork_api.JsonResourcePatch { return fmt.Errorf("json patch for resources is not supported, operation: %s", path.Operation) } - if !(path.Operation == stork_api.AddResourcePath || path.Operation == stork_api.DeleteResourcePath) { + if !(path.Operation == stork_api.AddResourcePath || path.Operation == stork_api.DeleteResourcePath || + path.Operation == stork_api.ModifyResourcePathValue) { return fmt.Errorf("unsupported resource patch operation given for kind :%s, operation: %s", kind, path.Operation) } if !(path.Type == stork_api.BoolResourceType || path.Type == stork_api.IntResourceType || @@ -189,6 +194,7 @@ func (r *ResourceTransformationController) validateSpecPath(transform *stork_api } } } + log.TransformLog(transform).Infof("validated paths ") return nil } @@ -224,12 +230,22 @@ func (r *ResourceTransformationController) validateTransformResource(ctx context log.TransformLog(transform).Errorf("Error getting resources kind:%s, err: %v", kind, err) return err } + // TODO: we can pass in remote config and dry run on remote cluster as well + localconfig, err := clientcmd.BuildConfigFromFlags("", "") + if err != nil { + return err + } + localInterface, err := dynamic.NewForConfig(localconfig) + if err != nil { + return err + } for _, path := range spec.Paths { - if !(path.Operation == stork_api.AddResourcePath || path.Operation == stork_api.DeleteResourcePath) { + // This can be handle by CRD validation- v1 version crd support + if !(path.Operation == stork_api.AddResourcePath || path.Operation == stork_api.DeleteResourcePath || + path.Operation == stork_api.ModifyResourcePathValue) { return fmt.Errorf("unsupported operation type for given path : %s", path.Operation) } for _, object := range objects.Items { - content := object.UnstructuredContent() metadata, err := meta.Accessor(object) if err != nil { log.TransformLog(transform).Errorf("Unable to read metadata for resource %v, err: %v", kind, err) @@ -239,9 +255,9 @@ func (r *ResourceTransformationController) validateTransformResource(ctx context Name: metadata.GetName(), Namespace: metadata.GetNamespace(), GroupVersionKind: metav1.GroupVersionKind(object.GetObjectKind().GroupVersionKind()), + Specs: spec, } - err = unstructured.SetNestedField(content, path.Value, strings.Split(path.Path, ".")...) - if err != nil { + if err := resourcecollector.TransformResources(object, []stork_api.TransformResourceInfo{*resInfo}, metadata.GetName(), metadata.GetNamespace()); err != nil { log.TransformLog(transform).Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, kind, resInfo.Namespace, resInfo.Name, err) resInfo.Status = stork_api.ResourceTransformationStatusFailed resInfo.Reason = err.Error() @@ -250,21 +266,12 @@ func (r *ResourceTransformationController) validateTransformResource(ctx context if !ok { return fmt.Errorf("unable to cast object to unstructured: %v", object) } - // TODO: we can pass in remote config and dry run on remote cluster as well - localconfig, err := clientcmd.BuildConfigFromFlags("", "") - if err != nil { - return err - } - localInterface, err := dynamic.NewForConfig(localconfig) - if err != nil { - return err - } resource := &metav1.APIResource{ Name: inflect.Pluralize(strings.ToLower(kind)), Namespaced: len(metadata.GetNamespace()) > 0, } dynamicClient := localInterface.Resource( - object.GetObjectKind().GroupVersionKind().GroupVersion().WithResource(resource.Name)).Namespace(metadata.GetNamespace()) + object.GetObjectKind().GroupVersionKind().GroupVersion().WithResource(resource.Name)).Namespace(getTransformNamespace(transform.Namespace)) unstructured.SetNamespace(getTransformNamespace(transform.Namespace)) log.TransformLog(transform).Infof("Applying %v %v", object.GetObjectKind(), metadata.GetName()) diff --git a/pkg/resourcecollector/resourcetransformation.go b/pkg/resourcecollector/resourcetransformation.go new file mode 100644 index 0000000000..af736aead5 --- /dev/null +++ b/pkg/resourcecollector/resourcetransformation.go @@ -0,0 +1,140 @@ +package resourcecollector + +import ( + "fmt" + "strings" + + stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + storkops "github.com/portworx/sched-ops/k8s/stork" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +// Since we collect all resources from required migration namespace at once +// getResourcePatch creates map of namespace: {kind: []resourceinfo{}} +// to get transform spec for matching resources +func GetResourcePatch(transformName string, namespaces []string) (map[string]stork_api.KindResourceTransform, error) { + // namespace- Kind:TransformSpec map for faster lookup + patch := make(map[string]stork_api.KindResourceTransform) + if transformName == "" { + logrus.Infof("empty name ") + return patch, nil + } + for _, namespace := range namespaces { + resp, err := storkops.Instance().GetResourceTransformation(transformName, namespace) + if err != nil && !errors.IsNotFound(err) { + logrus.Infof("not found in namespace: %v", transformName) + return nil, err + } + resMap := make(map[string][]stork_api.TransformResourceInfo) + for _, resource := range resp.Status.Resources { + resMap[resource.Kind] = append(resMap[resource.Group], *resource) + } + patch[namespace] = resMap + } + logrus.Infof("resource patch : %v", patch) + return patch, nil +} + +// this method transform object as per resource transformation specified in each namespaces +func TransformResources( + object runtime.Unstructured, + resPatch []stork_api.TransformResourceInfo, + name, namespace string, +) error { + for _, patch := range resPatch { + if patch.Name == name && patch.Namespace == namespace { + content := object.UnstructuredContent() + for _, path := range patch.Specs.Paths { + switch path.Operation { + case stork_api.AddResourcePath: + value := getNewValueForPath(path.Value, string(path.Type)) + if path.Type == stork_api.KeyPairResourceType { + updateMap := value.(map[string]string) + err := unstructured.SetNestedStringMap(content, updateMap, strings.Split(path.Path, ".")...) + if err != nil { + logrus.Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, patch.Kind, patch.Namespace, patch.Name, err) + return err + } + } else if path.Type == stork_api.SliceResourceType { + err := unstructured.SetNestedField(content, value, strings.Split(path.Path, ".")...) + if err != nil { + logrus.Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, patch.Kind, patch.Namespace, patch.Name, err) + return err + } + } else { + err := unstructured.SetNestedField(content, value, strings.Split(path.Path, ".")...) + if err != nil { + logrus.Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, patch.Kind, patch.Namespace, patch.Name, err) + return err + } + } + + case stork_api.DeleteResourcePath: + unstructured.RemoveNestedField(content, strings.Split(path.Path, ".")...) + logrus.Debugf("Removed patch path %s on resource kind: %s/,%s/%s", path, patch.Kind, patch.Namespace, patch.Name) + + case stork_api.ModifyResourcePathValue: + var value interface{} + if path.Type == stork_api.KeyPairResourceType { + currMap, _, err := unstructured.NestedMap(content, strings.Split(path.Path, ".")...) + if err != nil { + return fmt.Errorf("unable to find suspend path, err: %v", err) + } + mapList := strings.Split(path.Value, ",") + for _, val := range mapList { + keyPair := strings.Split(val, ":") + currMap[keyPair[0]] = keyPair[1] + } + value = currMap + } else if path.Type == stork_api.SliceResourceType { + currList, _, err := unstructured.NestedSlice(content, strings.Split(path.Path, ".")...) + if err != nil { + return fmt.Errorf("unable to find suspend path, err: %v", err) + } + arrList := strings.Split(path.Value, ",") + for _, val := range arrList { + currList = append(currList, val) + } + value = currList + } else { + value = path.Value + } + err := unstructured.SetNestedField(content, value, strings.Split(path.Path, ".")...) + if err != nil { + logrus.Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, patch.Kind, patch.Namespace, patch.Name, err) + return err + } + } + } + object.SetUnstructuredContent(content) + logrus.Infof("updated resource of kind %v with patch , resource: %v", patch.Kind, object) + } + } + return nil +} + +func getNewValueForPath(oldVal, valType string) interface{} { + var updatedValue interface{} + if valType == string(stork_api.KeyPairResourceType) { + newVal := make(map[string]string) + mapList := strings.Split(oldVal, ",") + for _, val := range mapList { + keyPair := strings.Split(val, ":") + newVal[keyPair[0]] = keyPair[1] + } + updatedValue = newVal + logrus.Infof("map updated : %v", updatedValue, mapList) + } else if valType == string(stork_api.SliceResourceType) { + newVal := []string{} + arrList := strings.Split(oldVal, ",") + newVal = append(newVal, arrList...) + updatedValue = newVal + logrus.Infof("map updated : %v", updatedValue, arrList) + } else { + updatedValue = oldVal + } + return updatedValue +} From 65e7637f21fccbec393384cc8c5b0e936be6fdf4 Mon Sep 17 00:00:00 2001 From: Ram Date: Wed, 24 Aug 2022 19:32:48 +0530 Subject: [PATCH 13/97] Allow enable/disable resource transformation controller - addressed review comments Signed-off-by: Ram --- cmd/stork/stork.go | 12 ++++++++++++ pkg/migration/controllers/migration.go | 3 +-- pkg/migration/migration.go | 3 +-- pkg/resourcecollector/resourcetransformation.go | 9 ++++----- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/cmd/stork/stork.go b/cmd/stork/stork.go index 0c2457427f..f3d27bc075 100644 --- a/cmd/stork/stork.go +++ b/cmd/stork/stork.go @@ -29,6 +29,7 @@ import ( "github.com/libopenstorage/stork/pkg/k8sutils" "github.com/libopenstorage/stork/pkg/metrics" "github.com/libopenstorage/stork/pkg/migration" + "github.com/libopenstorage/stork/pkg/migration/controllers" "github.com/libopenstorage/stork/pkg/monitor" "github.com/libopenstorage/stork/pkg/objectcontroller" "github.com/libopenstorage/stork/pkg/pvcwatcher" @@ -201,6 +202,10 @@ func main() { Value: 4, Usage: "Max threads for apply resources during migration (default: 4)", }, + cli.BoolTFlag{ + Name: controllers.ResourceTransformationControllerName, + Usage: "Start the resource transformation controller (default: true)", + }, } if err := app.Run(os.Args); err != nil { @@ -468,6 +473,13 @@ func runStork(mgr manager.Manager, d volume.Driver, recorder record.EventRecorde } } + if c.Bool(controllers.ResourceTransformationControllerName) { + rt := controllers.NewResourceTransformation(mgr, d, recorder, resourceCollector) + if err := rt.Init(mgr); err != nil { + log.Fatalf("Error initializing resource transformation controller: %v", err) + } + } + if c.Bool("cluster-domain-controllers") { clusterDomains := clusterdomains.ClusterDomains{ Driver: d, diff --git a/pkg/migration/controllers/migration.go b/pkg/migration/controllers/migration.go index ecc821e241..80c1392fea 100644 --- a/pkg/migration/controllers/migration.go +++ b/pkg/migration/controllers/migration.go @@ -1043,7 +1043,7 @@ func (m *MigrationController) prepareResources( return fmt.Errorf("error preparing PV resource %v: %v", metadata.GetName(), err) } case "Deployment", "StatefulSet", "DeploymentConfig", "IBPPeer", "IBPCA", "IBPConsole", "IBPOrderer", "ReplicaSet": - err := m.prepareApplicationResource(migration, clusterPair, o, resPatch[metadata.GetNamespace()][resource.Kind]) + err := m.prepareApplicationResource(migration, clusterPair, o) if err != nil { return fmt.Errorf("error preparing %v resource %v: %v", o.GetObjectKind().GroupVersionKind().Kind, metadata.GetName(), err) } @@ -1287,7 +1287,6 @@ func (m *MigrationController) prepareApplicationResource( migration *stork_api.Migration, clusterPair *stork_api.ClusterPair, object runtime.Unstructured, - resPatch []stork_api.TransformResourceInfo, ) error { content := object.UnstructuredContent() if clusterPair.Spec.PlatformOptions.Rancher != nil && diff --git a/pkg/migration/migration.go b/pkg/migration/migration.go index ddd20826c1..b63adf001d 100644 --- a/pkg/migration/migration.go +++ b/pkg/migration/migration.go @@ -39,6 +39,5 @@ func (m *Migration) Init(mgr manager.Manager, migrationAdminNamespace string, mi if err != nil { return fmt.Errorf("error initializing migration schedule controller: %v", err) } - rt := controllers.NewResourceTransformation(mgr, m.Driver, m.Recorder, m.ResourceCollector) - return rt.Init(mgr) + return nil } diff --git a/pkg/resourcecollector/resourcetransformation.go b/pkg/resourcecollector/resourcetransformation.go index af736aead5..0f6d63f8a9 100644 --- a/pkg/resourcecollector/resourcetransformation.go +++ b/pkg/resourcecollector/resourcetransformation.go @@ -19,7 +19,7 @@ func GetResourcePatch(transformName string, namespaces []string) (map[string]sto // namespace- Kind:TransformSpec map for faster lookup patch := make(map[string]stork_api.KindResourceTransform) if transformName == "" { - logrus.Infof("empty name ") + logrus.Error("Empty name received for resource transformation") return patch, nil } for _, namespace := range namespaces { @@ -30,11 +30,10 @@ func GetResourcePatch(transformName string, namespaces []string) (map[string]sto } resMap := make(map[string][]stork_api.TransformResourceInfo) for _, resource := range resp.Status.Resources { - resMap[resource.Kind] = append(resMap[resource.Group], *resource) + resMap[resource.Kind] = append(resMap[resource.Kind], *resource) } patch[namespace] = resMap } - logrus.Infof("resource patch : %v", patch) return patch, nil } @@ -42,10 +41,10 @@ func GetResourcePatch(transformName string, namespaces []string) (map[string]sto func TransformResources( object runtime.Unstructured, resPatch []stork_api.TransformResourceInfo, - name, namespace string, + objName, objNamespace string, ) error { for _, patch := range resPatch { - if patch.Name == name && patch.Namespace == namespace { + if patch.Name == objName && patch.Namespace == objNamespace { content := object.UnstructuredContent() for _, path := range patch.Specs.Paths { switch path.Operation { From 0060ffb184bac0f974ff72cd4dc7d326cadc4ca6 Mon Sep 17 00:00:00 2001 From: Ram Date: Wed, 24 Aug 2022 20:27:04 +0530 Subject: [PATCH 14/97] Dry-run resource transformation validation during migration prechecks - validate transform CR - dry-run on newly detected object before starting migration Signed-off-by: Ram --- pkg/apis/stork/v1alpha1/migration.go | 2 +- pkg/migration/controllers/migration.go | 60 ++++++++++++++++--- .../controllers/resourcetransformation.go | 5 +- 3 files changed, 56 insertions(+), 11 deletions(-) diff --git a/pkg/apis/stork/v1alpha1/migration.go b/pkg/apis/stork/v1alpha1/migration.go index 9067450f13..b8b7313a0d 100644 --- a/pkg/apis/stork/v1alpha1/migration.go +++ b/pkg/apis/stork/v1alpha1/migration.go @@ -27,7 +27,7 @@ type MigrationSpec struct { PostExecRule string `json:"postExecRule"` IncludeOptionalResourceTypes []string `json:"includeOptionalResourceTypes"` SkipDeletedNamespaces *bool `json:"skipDeletedNamespaces"` - UpdateResourceSpecs string `json:"updateResourceSpecs"` + TransformSpecs []string `json:"transformSpecs"` } // MigrationStatus is the status of a migration operation diff --git a/pkg/migration/controllers/migration.go b/pkg/migration/controllers/migration.go index 80c1392fea..fc08ee11af 100644 --- a/pkg/migration/controllers/migration.go +++ b/pkg/migration/controllers/migration.go @@ -372,10 +372,9 @@ func (m *MigrationController) handle(ctx context.Context, migration *stork_api.M } } // Make sure if transformation CR is in ready state - if migration.Spec.UpdateResourceSpecs != "" { - resp, err := storkops.Instance().GetResourceTransformation(migration.Spec.UpdateResourceSpecs, ns) - if err != nil && !errors.IsNotFound(err) { - errMsg := fmt.Sprintf("unable to retrive transformation %s, err: %v", migration.Spec.UpdateResourceSpecs, err) + if len(migration.Spec.TransformSpecs) != 0 { + if len(migration.Spec.TransformSpecs) > 1 { + errMsg := fmt.Sprintf("providing multiple transformation specs is not supported in this release %v, err: %v", migration.Spec.TransformSpecs, err) log.MigrationLog(migration).Errorf(errMsg) m.recorder.Event(migration, v1.EventTypeWarning, @@ -387,8 +386,9 @@ func (m *MigrationController) handle(ctx context.Context, migration *stork_api.M } return nil } - if resp != nil && resp.Status.Status != stork_api.ResourceTransformationStatusReady { - errMsg := fmt.Sprintf("transformation %s is not in ready state: %s", migration.Spec.UpdateResourceSpecs, resp.Status.Status) + resp, err := storkops.Instance().GetResourceTransformation(migration.Spec.TransformSpecs[0], ns) + if err != nil && !errors.IsNotFound(err) { + errMsg := fmt.Sprintf("unable to retrive transformation %s, err: %v", migration.Spec.TransformSpecs, err) log.MigrationLog(migration).Errorf(errMsg) m.recorder.Event(migration, v1.EventTypeWarning, @@ -400,6 +400,44 @@ func (m *MigrationController) handle(ctx context.Context, migration *stork_api.M } return nil } + if resp != nil { + // ensure to re-run dry-run for newly introduced object before + // starting migration + resp.Status.Resources = []*stork_api.TransformResourceInfo{} + resp.Status.Status = stork_api.ResourceTransformationStatusInitial + resp.ResourceVersion = "" + transform, err := storkops.Instance().UpdateResourceTransformation(resp) + if err != nil && !errors.IsNotFound(err) { + errMsg := fmt.Sprintf("Error updating transformation CR: %v", err) + log.MigrationLog(migration).Errorf(errMsg) + m.recorder.Event(migration, + v1.EventTypeWarning, + string(stork_api.MigrationStatusFailed), + err.Error()) + err = m.updateMigrationCR(context.Background(), migration) + if err != nil { + if err != nil { + log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) + } + } + return nil + } + if err := storkops.Instance().ValidateResourceTransformation(transform.Name, ns, 1*time.Minute, 5*time.Second); err != nil { + errMsg := fmt.Sprintf("transformation %s is not in ready state: %s", migration.Spec.TransformSpecs, resp.Status.Status) + log.MigrationLog(migration).Errorf(errMsg) + m.recorder.Event(migration, + v1.EventTypeWarning, + string(stork_api.MigrationStatusFailed), + err.Error()) + err = m.updateMigrationCR(context.Background(), migration) + if err != nil { + if err != nil { + log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) + } + } + return nil + } + } } } // Make sure the rules exist if configured @@ -1025,10 +1063,16 @@ func (m *MigrationController) prepareResources( if err != nil { return err } - resPatch, err := resourcecollector.GetResourcePatch(migration.Spec.UpdateResourceSpecs, migration.Spec.Namespaces) + transformName := "" + // this is already handled in pre-checks, we dont support multiple resource transformation + // rules specified in migration specs + if len(migration.Spec.TransformSpecs) != 0 && len(migration.Spec.TransformSpecs) == 1 { + transformName = migration.Spec.TransformSpecs[0] + } + resPatch, err := resourcecollector.GetResourcePatch(transformName, migration.Spec.Namespaces) if err != nil { log.MigrationLog(migration). - Warnf("Unable to get transformation spec from :%s, skipping transformation for this migration, err: %v", err) + Warnf("Unable to get transformation spec from :%s, skipping transformation for this migration, err: %v", transformName, err) } for _, o := range objects { metadata, err := meta.Accessor(o) diff --git a/pkg/migration/controllers/resourcetransformation.go b/pkg/migration/controllers/resourcetransformation.go index d2c0d8b4c0..f2624d0ab2 100644 --- a/pkg/migration/controllers/resourcetransformation.go +++ b/pkg/migration/controllers/resourcetransformation.go @@ -32,7 +32,8 @@ import ( ) const ( - resourceTransformationController = "resource-transformation-controller" + // ResourceTransformationControllerName of resource transformation CR handler + ResourceTransformationControllerName = "resource-transformation-controller" ) // NewResourceTransformation creates a new instance of ResourceTransformation Manager @@ -59,7 +60,7 @@ func (r *ResourceTransformationController) Init(mgr manager.Manager) error { return err } - return controllers.RegisterTo(mgr, resourceTransformationController, r, &stork_api.ResourceTransformation{}) + return controllers.RegisterTo(mgr, ResourceTransformationControllerName, r, &stork_api.ResourceTransformation{}) } // Reconcile manages ResourceTransformation resources. From f773ff6f7b09dabe881190fbc72b98f38ddffb00 Mon Sep 17 00:00:00 2001 From: Ram Date: Fri, 26 Aug 2022 12:46:06 +0530 Subject: [PATCH 15/97] Ensure to re-run transformation validation before each migration run Signed-off-by: Ram --- pkg/migration/controllers/migration.go | 84 ++++++++++--------- .../controllers/resourcetransformation.go | 17 ++-- .../resourcetransformation.go | 17 ++-- pkg/storkctl/clusterpair.go | 2 +- 4 files changed, 66 insertions(+), 54 deletions(-) diff --git a/pkg/migration/controllers/migration.go b/pkg/migration/controllers/migration.go index fc08ee11af..a69ed317d9 100644 --- a/pkg/migration/controllers/migration.go +++ b/pkg/migration/controllers/migration.go @@ -373,6 +373,7 @@ func (m *MigrationController) handle(ctx context.Context, migration *stork_api.M } // Make sure if transformation CR is in ready state if len(migration.Spec.TransformSpecs) != 0 { + // Check if multiple transformation specs are provided if len(migration.Spec.TransformSpecs) > 1 { errMsg := fmt.Sprintf("providing multiple transformation specs is not supported in this release %v, err: %v", migration.Spec.TransformSpecs, err) log.MigrationLog(migration).Errorf(errMsg) @@ -386,9 +387,10 @@ func (m *MigrationController) handle(ctx context.Context, migration *stork_api.M } return nil } + // verify if transform specs are created resp, err := storkops.Instance().GetResourceTransformation(migration.Spec.TransformSpecs[0], ns) - if err != nil && !errors.IsNotFound(err) { - errMsg := fmt.Sprintf("unable to retrive transformation %s, err: %v", migration.Spec.TransformSpecs, err) + if err != nil { + errMsg := fmt.Sprintf("unable to retrieve transformation %s, err: %v", migration.Spec.TransformSpecs, err) log.MigrationLog(migration).Errorf(errMsg) m.recorder.Event(migration, v1.EventTypeWarning, @@ -400,43 +402,37 @@ func (m *MigrationController) handle(ctx context.Context, migration *stork_api.M } return nil } - if resp != nil { - // ensure to re-run dry-run for newly introduced object before - // starting migration - resp.Status.Resources = []*stork_api.TransformResourceInfo{} - resp.Status.Status = stork_api.ResourceTransformationStatusInitial - resp.ResourceVersion = "" - transform, err := storkops.Instance().UpdateResourceTransformation(resp) - if err != nil && !errors.IsNotFound(err) { - errMsg := fmt.Sprintf("Error updating transformation CR: %v", err) - log.MigrationLog(migration).Errorf(errMsg) - m.recorder.Event(migration, - v1.EventTypeWarning, - string(stork_api.MigrationStatusFailed), - err.Error()) - err = m.updateMigrationCR(context.Background(), migration) - if err != nil { - if err != nil { - log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) - } - } - return nil + // ensure to re-run dry-run for newly introduced object before + // starting migration + resp.Status.Resources = []*stork_api.TransformResourceInfo{} + resp.Status.Status = stork_api.ResourceTransformationStatusInitial + transform, err := storkops.Instance().UpdateResourceTransformation(resp) + if err != nil && !errors.IsNotFound(err) { + errMsg := fmt.Sprintf("Error updating transformation CR: %v", err) + log.MigrationLog(migration).Errorf(errMsg) + m.recorder.Event(migration, + v1.EventTypeWarning, + string(stork_api.MigrationStatusFailed), + err.Error()) + err = m.updateMigrationCR(context.Background(), migration) + if err != nil { + log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) } - if err := storkops.Instance().ValidateResourceTransformation(transform.Name, ns, 1*time.Minute, 5*time.Second); err != nil { - errMsg := fmt.Sprintf("transformation %s is not in ready state: %s", migration.Spec.TransformSpecs, resp.Status.Status) - log.MigrationLog(migration).Errorf(errMsg) - m.recorder.Event(migration, - v1.EventTypeWarning, - string(stork_api.MigrationStatusFailed), - err.Error()) - err = m.updateMigrationCR(context.Background(), migration) - if err != nil { - if err != nil { - log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) - } - } - return nil + return nil + } + // wait for re-run of dry-run resources + if err := storkops.Instance().ValidateResourceTransformation(transform.Name, ns, 1*time.Minute, 5*time.Second); err != nil { + errMsg := fmt.Sprintf("transformation %s is not in ready state: %s", migration.Spec.TransformSpecs, resp.Status.Status) + log.MigrationLog(migration).Errorf(errMsg) + m.recorder.Event(migration, + v1.EventTypeWarning, + string(stork_api.MigrationStatusFailed), + err.Error()) + err = m.updateMigrationCR(context.Background(), migration) + if err != nil { + log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) } + return nil } } } @@ -1069,11 +1065,17 @@ func (m *MigrationController) prepareResources( if len(migration.Spec.TransformSpecs) != 0 && len(migration.Spec.TransformSpecs) == 1 { transformName = migration.Spec.TransformSpecs[0] } - resPatch, err := resourcecollector.GetResourcePatch(transformName, migration.Spec.Namespaces) - if err != nil { - log.MigrationLog(migration). - Warnf("Unable to get transformation spec from :%s, skipping transformation for this migration, err: %v", transformName, err) + + resPatch := make(map[string]stork_api.KindResourceTransform) + if transformName != "" { + resPatch, err = resourcecollector.GetResourcePatch(transformName, migration.Spec.Namespaces) + if err != nil { + log.MigrationLog(migration). + Warnf("Unable to get transformation spec from :%s, skipping transformation for this migration, err: %v", transformName, err) + return err + } } + for _, o := range objects { metadata, err := meta.Accessor(o) if err != nil { diff --git a/pkg/migration/controllers/resourcetransformation.go b/pkg/migration/controllers/resourcetransformation.go index f2624d0ab2..548aacd948 100644 --- a/pkg/migration/controllers/resourcetransformation.go +++ b/pkg/migration/controllers/resourcetransformation.go @@ -65,8 +65,6 @@ func (r *ResourceTransformationController) Init(mgr manager.Manager) error { // Reconcile manages ResourceTransformation resources. func (r *ResourceTransformationController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - logrus.Infof("Reconciling ResourceTransformation %s/%s", request.Namespace, request.Name) - resourceTransformation := &stork_api.ResourceTransformation{} err := r.client.Get(context.TODO(), request.NamespacedName, resourceTransformation) if err != nil { @@ -111,7 +109,7 @@ func (r *ResourceTransformationController) handle(ctx context.Context, transform ns := &v1.Namespace{} ns.Name = getTransformNamespace(transform.Namespace) _, err := core.Instance().CreateNamespace(ns) - if err != nil { + if err != nil && !errors.IsAlreadyExists(err) { message := fmt.Sprintf("Unable to create resource transformation namespace: %v", err) log.TransformLog(transform).Errorf(message) r.recorder.Event(transform, @@ -213,7 +211,6 @@ func (r *ResourceTransformationController) validateTransformResource(ctx context Namespaced: true, Group: group, } - log.TransformLog(transform).Infof("querying resource: %v", resource) objects, err := r.resourceCollector.GetResourcesForType( resource, nil, @@ -275,7 +272,9 @@ func (r *ResourceTransformationController) validateTransformResource(ctx context object.GetObjectKind().GroupVersionKind().GroupVersion().WithResource(resource.Name)).Namespace(getTransformNamespace(transform.Namespace)) unstructured.SetNamespace(getTransformNamespace(transform.Namespace)) - log.TransformLog(transform).Infof("Applying %v %v", object.GetObjectKind(), metadata.GetName()) + log.TransformLog(transform).Infof("Applying object %s, %s", + object.GetObjectKind().GroupVersionKind().Kind, + metadata.GetName()) _, err = dynamicClient.Create(context.TODO(), unstructured, metav1.CreateOptions{DryRun: []string{"All"}}) if err != nil { log.TransformLog(transform).Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, kind, resInfo.Namespace, resInfo.Name, err) @@ -290,10 +289,18 @@ func (r *ResourceTransformationController) validateTransformResource(ctx context } } } + transform.Status.Status = stork_api.ResourceTransformationStatusReady + // verify if all resource dry-run is successful + for _, resource := range transform.Status.Resources { + if resource.Status != stork_api.ResourceTransformationStatusReady { + transform.Status.Status = stork_api.ResourceTransformationStatusFailed + } + } return r.client.Update(ctx, transform) } +// return group,version,kind from give resource type func getGVK(resource string) (string, string, string, error) { gvk := strings.Split(resource, "/") if len(gvk) != 3 { diff --git a/pkg/resourcecollector/resourcetransformation.go b/pkg/resourcecollector/resourcetransformation.go index 0f6d63f8a9..645245cbb4 100644 --- a/pkg/resourcecollector/resourcetransformation.go +++ b/pkg/resourcecollector/resourcetransformation.go @@ -24,8 +24,13 @@ func GetResourcePatch(transformName string, namespaces []string) (map[string]sto } for _, namespace := range namespaces { resp, err := storkops.Instance().GetResourceTransformation(transformName, namespace) - if err != nil && !errors.IsNotFound(err) { - logrus.Infof("not found in namespace: %v", transformName) + if err != nil { + // current namespace does not have any transform CR + // skip it from map + if errors.IsNotFound(err) { + continue + } + logrus.Errorf("Unable to get resource transfomration specs %s/%s, err: %v", namespace, transformName, err) return nil, err } resMap := make(map[string][]stork_api.TransformResourceInfo) @@ -66,7 +71,7 @@ func TransformResources( } else { err := unstructured.SetNestedField(content, value, strings.Split(path.Path, ".")...) if err != nil { - logrus.Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, patch.Kind, patch.Namespace, patch.Name, err) + logrus.Errorf("Unable to perform operation %s on path %s on resource kind: %s/,%s/%s, err: %v", path.Operation, path, patch.Kind, patch.Namespace, patch.Name, err) return err } } @@ -103,13 +108,13 @@ func TransformResources( } err := unstructured.SetNestedField(content, value, strings.Split(path.Path, ".")...) if err != nil { - logrus.Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, patch.Kind, patch.Namespace, patch.Name, err) + logrus.Errorf("Unable to perform operation %s on path %s on resource kind: %s/,%s/%s, err: %v", path.Operation, path, patch.Kind, patch.Namespace, patch.Name, err) return err } } } object.SetUnstructuredContent(content) - logrus.Infof("updated resource of kind %v with patch , resource: %v", patch.Kind, object) + logrus.Infof("Updated resource of kind %v with patch , resource: %v", patch.Kind, object) } } return nil @@ -125,13 +130,11 @@ func getNewValueForPath(oldVal, valType string) interface{} { newVal[keyPair[0]] = keyPair[1] } updatedValue = newVal - logrus.Infof("map updated : %v", updatedValue, mapList) } else if valType == string(stork_api.SliceResourceType) { newVal := []string{} arrList := strings.Split(oldVal, ",") newVal = append(newVal, arrList...) updatedValue = newVal - logrus.Infof("map updated : %v", updatedValue, arrList) } else { updatedValue = oldVal } diff --git a/pkg/storkctl/clusterpair.go b/pkg/storkctl/clusterpair.go index 1073761e75..90a9f27307 100644 --- a/pkg/storkctl/clusterpair.go +++ b/pkg/storkctl/clusterpair.go @@ -12,8 +12,8 @@ import ( "github.com/libopenstorage/stork/pkg/utils" + clusterclient "github.com/libopenstorage/openstorage/api/client/cluster" storkv1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" - "github.com/libopenstorage/stork/pkg/utils" "github.com/portworx/sched-ops/k8s/core" storkops "github.com/portworx/sched-ops/k8s/stork" "github.com/spf13/cobra" From 73589e84080ebfb04b9993ade52a5069d0aea4b3 Mon Sep 17 00:00:00 2001 From: Ram Date: Tue, 30 Aug 2022 09:52:03 +0530 Subject: [PATCH 16/97] Vendor updates - sched-ops Signed-off-by: Ram --- go.mod | 2 +- go.sum | 2 + .../portworx/sched-ops/k8s/core/core.go | 1 + .../portworx/sched-ops/k8s/core/nodes.go | 3 +- .../k8s/stork/resourcetransformation.go | 113 ++++++++++++++++++ .../portworx/sched-ops/k8s/stork/stork.go | 1 + vendor/modules.txt | 4 +- 7 files changed, 122 insertions(+), 4 deletions(-) create mode 100644 vendor/github.com/portworx/sched-ops/k8s/stork/resourcetransformation.go diff --git a/go.mod b/go.mod index eb0141f9ef..f51559ebd3 100644 --- a/go.mod +++ b/go.mod @@ -76,7 +76,7 @@ replace ( github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc7 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 github.com/libopenstorage/autopilot-api => github.com/libopenstorage/autopilot-api v0.6.1-0.20210301232050-ca2633c6e114 github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20220707215604-afbea03c04c5 - github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca + github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.0 diff --git a/go.sum b/go.sum index 2d111bbd31..c60bf77143 100644 --- a/go.sum +++ b/go.sum @@ -1426,6 +1426,8 @@ github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987/go.m github.com/portworx/pxc v0.33.0/go.mod h1:Tl7hf4K2CDr0XtxzM08sr9H/KsMhscjf9ydb+MnT0U4= github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca h1:jrjwiQdqgDRsQZuiRDaWsbvx/z5t1icQPf7dgJOQUKE= github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca/go.mod h1:0IQvado0rnmbRMORaCqCDrrzjBrX5sU+Sz2+vQwEsjM= +github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 h1:4VuOzgXy6EU6zrVTEP4wlAaBUwdGA2jY1ckyjthTvb8= +github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496/go.mod h1:/xDBMzUV30kbdQYaPdAFcAYqEada6ZnWi4zt4KzFzAI= github.com/portworx/talisman v0.0.0-20210302012732-8af4564777f7/go.mod h1:e8a6uFpSbOlRpZQlW9aXYogC+GWAo065G0RL9hDkD4Q= github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 h1:P4Lo6jDUUKglz7rkqlK8Hg4gLXqIIrgQaEeWxcXrV8U= github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1/go.mod h1:I2wJjwLvCub+L1eNHWyHIIe6SrCreMVgwym4dCsR1WE= diff --git a/vendor/github.com/portworx/sched-ops/k8s/core/core.go b/vendor/github.com/portworx/sched-ops/k8s/core/core.go index 30eb90acf5..72c9cb220d 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/core/core.go +++ b/vendor/github.com/portworx/sched-ops/k8s/core/core.go @@ -23,6 +23,7 @@ import ( const ( masterLabelKey = "node-role.kubernetes.io/master" controlplaneLabelKey = "node-role.kubernetes.io/controlplane" + controlDashPlaneLabelKey = "node-role.kubernetes.io/control-plane" pvcStorageProvisionerKey = "volume.beta.kubernetes.io/storage-provisioner" labelUpdateMaxRetries = 5 ) diff --git a/vendor/github.com/portworx/sched-ops/k8s/core/nodes.go b/vendor/github.com/portworx/sched-ops/k8s/core/nodes.go index 55fd5806f7..e266dff2f0 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/core/nodes.go +++ b/vendor/github.com/portworx/sched-ops/k8s/core/nodes.go @@ -138,7 +138,8 @@ func (c *Client) IsNodeMaster(node corev1.Node) bool { // for newer k8s these fields exist but they are empty _, hasMasterLabel := node.Labels[masterLabelKey] _, hasControlPlaneLabel := node.Labels[controlplaneLabelKey] - if hasMasterLabel || hasControlPlaneLabel { + _, hasControlDashPlaneLabel := node.Labels[controlDashPlaneLabelKey] + if hasMasterLabel || hasControlPlaneLabel || hasControlDashPlaneLabel { return true } return false diff --git a/vendor/github.com/portworx/sched-ops/k8s/stork/resourcetransformation.go b/vendor/github.com/portworx/sched-ops/k8s/stork/resourcetransformation.go new file mode 100644 index 0000000000..22ef5bfcb3 --- /dev/null +++ b/vendor/github.com/portworx/sched-ops/k8s/stork/resourcetransformation.go @@ -0,0 +1,113 @@ +package stork + +import ( + "context" + "fmt" + "time" + + storkv1alpha1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/portworx/sched-ops/k8s/errors" + "github.com/portworx/sched-ops/task" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ResourceTransformOps is an interface to perform k8s ResourceTransformOps operations +type ResourceTransformOps interface { + // CreateResourceTransformation creates the ResourceTransformation + CreateResourceTransformation(*storkv1alpha1.ResourceTransformation) (*storkv1alpha1.ResourceTransformation, error) + // GetResourceTransformation gets the ResourceTransformation + GetResourceTransformation(string, string) (*storkv1alpha1.ResourceTransformation, error) + // ListResourceTransformations lists all the ResourceTransformations + ListResourceTransformations(namespace string, filterOptions metav1.ListOptions) (*storkv1alpha1.ResourceTransformationList, error) + // UpdateResourceTransformation updates the ResourceTransformation + UpdateResourceTransformation(*storkv1alpha1.ResourceTransformation) (*storkv1alpha1.ResourceTransformation, error) + // DeleteResourceTransformation deletes the ResourceTransformation + DeleteResourceTransformation(string, string) error + // ValidateResourceTransformation validates resource transformation status + ValidateResourceTransformation(string, string, time.Duration, time.Duration) error +} + +// CreateResourceTransformation creates the ResourceTransformation CR +func (c *Client) CreateResourceTransformation(ResourceTransformation *storkv1alpha1.ResourceTransformation) (*storkv1alpha1.ResourceTransformation, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.stork.StorkV1alpha1().ResourceTransformations(ResourceTransformation.Namespace).Create(context.TODO(), ResourceTransformation, metav1.CreateOptions{}) +} + +// GetResourceTransformation gets the ResourceTransformation CR +func (c *Client) GetResourceTransformation(name string, namespace string) (*storkv1alpha1.ResourceTransformation, error) { + if err := c.initClient(); err != nil { + return nil, err + } + ResourceTransformation, err := c.stork.StorkV1alpha1().ResourceTransformations(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return ResourceTransformation, nil +} + +// ListResourceTransformations lists all the ResourceTransformations CR +func (c *Client) ListResourceTransformations(namespace string, filterOptions metav1.ListOptions) (*storkv1alpha1.ResourceTransformationList, error) { + if err := c.initClient(); err != nil { + return nil, err + } + ResourceTransformations, err := c.stork.StorkV1alpha1().ResourceTransformations(namespace).List(context.TODO(), filterOptions) + if err != nil { + return nil, err + } + return ResourceTransformations, nil +} + +// UpdateResourceTransformation updates the ResourceTransformation CR +func (c *Client) UpdateResourceTransformation(ResourceTransformation *storkv1alpha1.ResourceTransformation) (*storkv1alpha1.ResourceTransformation, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.stork.StorkV1alpha1().ResourceTransformations(ResourceTransformation.Namespace).Update(context.TODO(), ResourceTransformation, metav1.UpdateOptions{}) +} + +// DeleteResourceTransformation deletes the ResourceTransformation CR +func (c *Client) DeleteResourceTransformation(name string, namespace string) error { + if err := c.initClient(); err != nil { + return err + } + return c.stork.StorkV1alpha1().ResourceTransformations(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{ + PropagationPolicy: &deleteForegroundPolicy, + }) +} + +// ValidateResourceTransformation validates ResourceTransformation CR status +func (c *Client) ValidateResourceTransformation(name string, namespace string, timeout, retryInterval time.Duration) error { + if err := c.initClient(); err != nil { + return err + } + t := func() (interface{}, bool, error) { + transform, err := c.GetResourceTransformation(name, namespace) + if err != nil { + return "", true, err + } + + if transform.Status.Status == storkv1alpha1.ResourceTransformationStatusReady { + return "", false, nil + } else if transform.Status.Status == storkv1alpha1.ResourceTransformationStatusFailed { + return "", true, &errors.ErrFailedToValidateCustomSpec{ + Name: name, + Cause: fmt.Sprintf("Status: %v \t Resource Spec: %v", transform.Status.Status, transform.Status.Resources), + Type: transform, + } + } + + return "", true, &errors.ErrFailedToValidateCustomSpec{ + Name: name, + Cause: fmt.Sprintf("Status: %v", transform.Status.Status), + Type: transform, + } + } + + if _, err := task.DoRetryWithTimeout(t, timeout, retryInterval); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/portworx/sched-ops/k8s/stork/stork.go b/vendor/github.com/portworx/sched-ops/k8s/stork/stork.go index 61e1127dbb..e2432e4800 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/stork/stork.go +++ b/vendor/github.com/portworx/sched-ops/k8s/stork/stork.go @@ -42,6 +42,7 @@ type Ops interface { ApplicationCloneOps VolumeSnapshotRestoreOps ApplicationRegistrationOps + ResourceTransformOps // SetConfig sets the config and resets the client SetConfig(config *rest.Config) diff --git a/vendor/modules.txt b/vendor/modules.txt index 618e15ae55..ecd2133802 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -731,7 +731,7 @@ github.com/portworx/px-object-controller/client/listers/objectservice/v1alpha1 github.com/portworx/px-object-controller/pkg/client github.com/portworx/px-object-controller/pkg/controller github.com/portworx/px-object-controller/pkg/utils -# github.com/portworx/sched-ops v1.20.4-rc1.0.20220401024625-dbc61a336f65 => github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca +# github.com/portworx/sched-ops v1.20.4-rc1.0.20220401024625-dbc61a336f65 => github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 ## explicit github.com/portworx/sched-ops/k8s/admissionregistration github.com/portworx/sched-ops/k8s/apiextensions @@ -1903,7 +1903,7 @@ sigs.k8s.io/yaml # github.com/kubernetes-incubator/external-storage => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 # github.com/libopenstorage/autopilot-api => github.com/libopenstorage/autopilot-api v0.6.1-0.20210301232050-ca2633c6e114 # github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20220707215604-afbea03c04c5 -# github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca +# github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 # github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 # gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 # helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.0 From f49f714af0a269ab925b55ae83e93d950b00ddc6 Mon Sep 17 00:00:00 2001 From: Ram Date: Tue, 30 Aug 2022 10:43:52 +0530 Subject: [PATCH 17/97] codegen for new schema definations Signed-off-by: Ram --- .../stork/v1alpha1/zz_generated.deepcopy.go | 392 +++++++++++++++++- .../fake/fake_resourcetransformation.go | 142 +++++++ .../stork/v1alpha1/fake/fake_stork_client.go | 4 + .../stork/v1alpha1/generated_expansion.go | 2 + .../stork/v1alpha1/resourcetransformation.go | 195 +++++++++ .../typed/stork/v1alpha1/stork_client.go | 5 + .../informers/externalversions/generic.go | 2 + .../stork/v1alpha1/interface.go | 7 + .../stork/v1alpha1/resourcetransformation.go | 90 ++++ .../stork/v1alpha1/expansion_generated.go | 8 + .../stork/v1alpha1/resourcetransformation.go | 99 +++++ 11 files changed, 945 insertions(+), 1 deletion(-) create mode 100644 pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_resourcetransformation.go create mode 100644 pkg/client/clientset/versioned/typed/stork/v1alpha1/resourcetransformation.go create mode 100644 pkg/client/informers/externalversions/stork/v1alpha1/resourcetransformation.go create mode 100644 pkg/client/listers/stork/v1alpha1/resourcetransformation.go diff --git a/pkg/apis/stork/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/stork/v1alpha1/zz_generated.deepcopy.go index c48026b5ff..0f09faac55 100644 --- a/pkg/apis/stork/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/stork/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -524,7 +525,9 @@ func (in *ApplicationRegistration) DeepCopyInto(out *ApplicationRegistration) { if in.Resources != nil { in, out := &in.Resources, &out.Resources *out = make([]ApplicationResource, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -585,6 +588,11 @@ func (in *ApplicationResource) DeepCopyInto(out *ApplicationResource) { *out = *in out.GroupVersionKind = in.GroupVersionKind out.SuspendOptions = in.SuspendOptions + if in.NestedSuspendOptions != nil { + in, out := &in.NestedSuspendOptions, &out.NestedSuspendOptions + *out = make([]SuspendOptions, len(*in)) + copy(*out, *in) + } return } @@ -806,6 +814,7 @@ func (in *BackupLocation) DeepCopyInto(out *BackupLocation) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Location.DeepCopyInto(&out.Location) + in.Cluster.DeepCopyInto(&out.Cluster) return } @@ -1081,6 +1090,37 @@ func (in *ClusterDomainsStatusList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterItem) DeepCopyInto(out *ClusterItem) { + *out = *in + if in.AWSClusterConfig != nil { + in, out := &in.AWSClusterConfig, &out.AWSClusterConfig + *out = new(S3Config) + **out = **in + } + if in.AzureClusterConfig != nil { + in, out := &in.AzureClusterConfig, &out.AzureClusterConfig + *out = new(AzureConfig) + **out = **in + } + if in.GCPClusterConfig != nil { + in, out := &in.GCPClusterConfig, &out.GCPClusterConfig + *out = new(GoogleConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterItem. +func (in *ClusterItem) DeepCopy() *ClusterItem { + if in == nil { + return nil + } + out := new(ClusterItem) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterPair) DeepCopyInto(out *ClusterPair) { *out = *in @@ -1153,6 +1193,7 @@ func (in *ClusterPairSpec) DeepCopyInto(out *ClusterPairSpec) { (*out)[key] = val } } + in.PlatformOptions.DeepCopyInto(&out.PlatformOptions) return } @@ -1498,6 +1539,38 @@ func (in *IntervalPolicy) DeepCopy() *IntervalPolicy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in KindResourceTransform) DeepCopyInto(out *KindResourceTransform) { + { + in := &in + *out = make(KindResourceTransform, len(*in)) + for key, val := range *in { + var outVal []TransformResourceInfo + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]TransformResourceInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KindResourceTransform. +func (in KindResourceTransform) DeepCopy() KindResourceTransform { + if in == nil { + return nil + } + out := new(KindResourceTransform) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Migration) DeepCopyInto(out *Migration) { *out = *in @@ -1729,6 +1802,11 @@ func (in *MigrationSpec) DeepCopyInto(out *MigrationSpec) { *out = new(bool) **out = **in } + if in.IncludeNetworkPolicyWithCIDR != nil { + in, out := &in.IncludeNetworkPolicyWithCIDR, &out.IncludeNetworkPolicyWithCIDR + *out = new(bool) + **out = **in + } if in.Selectors != nil { in, out := &in.Selectors, &out.Selectors *out = make(map[string]string, len(*in)) @@ -1741,6 +1819,16 @@ func (in *MigrationSpec) DeepCopyInto(out *MigrationSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.SkipDeletedNamespaces != nil { + in, out := &in.SkipDeletedNamespaces, &out.SkipDeletedNamespaces + *out = new(bool) + **out = **in + } + if in.TransformSpecs != nil { + in, out := &in.TransformSpecs, &out.TransformSpecs + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -1780,6 +1868,13 @@ func (in *MigrationStatus) DeepCopyInto(out *MigrationStatus) { } } in.FinishTimestamp.DeepCopyInto(&out.FinishTimestamp) + in.VolumeMigrationFinishTimestamp.DeepCopyInto(&out.VolumeMigrationFinishTimestamp) + in.ResourceMigrationFinishTimestamp.DeepCopyInto(&out.ResourceMigrationFinishTimestamp) + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(MigrationSummary) + **out = **in + } return } @@ -1793,6 +1888,22 @@ func (in *MigrationStatus) DeepCopy() *MigrationStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MigrationSummary) DeepCopyInto(out *MigrationSummary) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationSummary. +func (in *MigrationSummary) DeepCopy() *MigrationSummary { + if in == nil { + return nil + } + out := new(MigrationSummary) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MigrationTemplateSpec) DeepCopyInto(out *MigrationTemplateSpec) { *out = *in @@ -1945,6 +2056,216 @@ func (in *PVCSelectorSpec) DeepCopy() *PVCSelectorSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchStruct) DeepCopyInto(out *PatchStruct) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make(map[string]TransformResourceInfo, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchStruct. +func (in *PatchStruct) DeepCopy() *PatchStruct { + if in == nil { + return nil + } + out := new(PatchStruct) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { + *out = *in + if in.Rancher != nil { + in, out := &in.Rancher, &out.Rancher + *out = new(RancherSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformSpec. +func (in *PlatformSpec) DeepCopy() *PlatformSpec { + if in == nil { + return nil + } + out := new(PlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RancherSecret) DeepCopyInto(out *RancherSecret) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RancherSecret. +func (in *RancherSecret) DeepCopy() *RancherSecret { + if in == nil { + return nil + } + out := new(RancherSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RancherSpec) DeepCopyInto(out *RancherSpec) { + *out = *in + if in.ProjectMappings != nil { + in, out := &in.ProjectMappings, &out.ProjectMappings + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RancherSpec. +func (in *RancherSpec) DeepCopy() *RancherSpec { + if in == nil { + return nil + } + out := new(RancherSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePaths) DeepCopyInto(out *ResourcePaths) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePaths. +func (in *ResourcePaths) DeepCopy() *ResourcePaths { + if in == nil { + return nil + } + out := new(ResourcePaths) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTransformation) DeepCopyInto(out *ResourceTransformation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTransformation. +func (in *ResourceTransformation) DeepCopy() *ResourceTransformation { + if in == nil { + return nil + } + out := new(ResourceTransformation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceTransformation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTransformationList) DeepCopyInto(out *ResourceTransformationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceTransformation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTransformationList. +func (in *ResourceTransformationList) DeepCopy() *ResourceTransformationList { + if in == nil { + return nil + } + out := new(ResourceTransformationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceTransformationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTransformationSpec) DeepCopyInto(out *ResourceTransformationSpec) { + *out = *in + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]TransformSpecs, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTransformationSpec. +func (in *ResourceTransformationSpec) DeepCopy() *ResourceTransformationSpec { + if in == nil { + return nil + } + out := new(ResourceTransformationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTransformationStatus) DeepCopyInto(out *ResourceTransformationStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]*TransformResourceInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(TransformResourceInfo) + (*in).DeepCopyInto(*out) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTransformationStatus. +func (in *ResourceTransformationStatus) DeepCopy() *ResourceTransformationStatus { + if in == nil { + return nil + } + out := new(ResourceTransformationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RestoreVolumeInfo) DeepCopyInto(out *RestoreVolumeInfo) { *out = *in @@ -2253,6 +2574,75 @@ func (in *SuspendOptions) DeepCopy() *SuspendOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformResourceInfo) DeepCopyInto(out *TransformResourceInfo) { + *out = *in + out.GroupVersionKind = in.GroupVersionKind + in.Specs.DeepCopyInto(&out.Specs) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformResourceInfo. +func (in *TransformResourceInfo) DeepCopy() *TransformResourceInfo { + if in == nil { + return nil + } + out := new(TransformResourceInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformSpecPatch) DeepCopyInto(out *TransformSpecPatch) { + *out = *in + if in.GVK != nil { + in, out := &in.GVK, &out.GVK + *out = make(map[string]PatchStruct, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformSpecPatch. +func (in *TransformSpecPatch) DeepCopy() *TransformSpecPatch { + if in == nil { + return nil + } + out := new(TransformSpecPatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformSpecs) DeepCopyInto(out *TransformSpecs) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]ResourcePaths, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformSpecs. +func (in *TransformSpecs) DeepCopy() *TransformSpecs { + if in == nil { + return nil + } + out := new(TransformSpecs) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSnapshotRestore) DeepCopyInto(out *VolumeSnapshotRestore) { *out = *in diff --git a/pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_resourcetransformation.go b/pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_resourcetransformation.go new file mode 100644 index 0000000000..397599df33 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_resourcetransformation.go @@ -0,0 +1,142 @@ +/* +Copyright 2018 Openstorage.org + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeResourceTransformations implements ResourceTransformationInterface +type FakeResourceTransformations struct { + Fake *FakeStorkV1alpha1 + ns string +} + +var resourcetransformationsResource = schema.GroupVersionResource{Group: "stork.libopenstorage.org", Version: "v1alpha1", Resource: "resourcetransformations"} + +var resourcetransformationsKind = schema.GroupVersionKind{Group: "stork.libopenstorage.org", Version: "v1alpha1", Kind: "ResourceTransformation"} + +// Get takes name of the resourceTransformation, and returns the corresponding resourceTransformation object, and an error if there is any. +func (c *FakeResourceTransformations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceTransformation, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(resourcetransformationsResource, c.ns, name), &v1alpha1.ResourceTransformation{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ResourceTransformation), err +} + +// List takes label and field selectors, and returns the list of ResourceTransformations that match those selectors. +func (c *FakeResourceTransformations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceTransformationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(resourcetransformationsResource, resourcetransformationsKind, c.ns, opts), &v1alpha1.ResourceTransformationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ResourceTransformationList{ListMeta: obj.(*v1alpha1.ResourceTransformationList).ListMeta} + for _, item := range obj.(*v1alpha1.ResourceTransformationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested resourceTransformations. +func (c *FakeResourceTransformations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(resourcetransformationsResource, c.ns, opts)) + +} + +// Create takes the representation of a resourceTransformation and creates it. Returns the server's representation of the resourceTransformation, and an error, if there is any. +func (c *FakeResourceTransformations) Create(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.CreateOptions) (result *v1alpha1.ResourceTransformation, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(resourcetransformationsResource, c.ns, resourceTransformation), &v1alpha1.ResourceTransformation{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ResourceTransformation), err +} + +// Update takes the representation of a resourceTransformation and updates it. Returns the server's representation of the resourceTransformation, and an error, if there is any. +func (c *FakeResourceTransformations) Update(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.UpdateOptions) (result *v1alpha1.ResourceTransformation, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(resourcetransformationsResource, c.ns, resourceTransformation), &v1alpha1.ResourceTransformation{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ResourceTransformation), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeResourceTransformations) UpdateStatus(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.UpdateOptions) (*v1alpha1.ResourceTransformation, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(resourcetransformationsResource, "status", c.ns, resourceTransformation), &v1alpha1.ResourceTransformation{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ResourceTransformation), err +} + +// Delete takes name of the resourceTransformation and deletes it. Returns an error if one occurs. +func (c *FakeResourceTransformations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(resourcetransformationsResource, c.ns, name), &v1alpha1.ResourceTransformation{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeResourceTransformations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(resourcetransformationsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.ResourceTransformationList{}) + return err +} + +// Patch applies the patch and returns the patched resourceTransformation. +func (c *FakeResourceTransformations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceTransformation, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(resourcetransformationsResource, c.ns, name, pt, data, subresources...), &v1alpha1.ResourceTransformation{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ResourceTransformation), err +} diff --git a/pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_stork_client.go b/pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_stork_client.go index 3bf46cb4f9..6944d60715 100644 --- a/pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_stork_client.go +++ b/pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_stork_client.go @@ -84,6 +84,10 @@ func (c *FakeStorkV1alpha1) NamespacedSchedulePolicies(namespace string) v1alpha return &FakeNamespacedSchedulePolicies{c, namespace} } +func (c *FakeStorkV1alpha1) ResourceTransformations(namespace string) v1alpha1.ResourceTransformationInterface { + return &FakeResourceTransformations{c, namespace} +} + func (c *FakeStorkV1alpha1) Rules(namespace string) v1alpha1.RuleInterface { return &FakeRules{c, namespace} } diff --git a/pkg/client/clientset/versioned/typed/stork/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/stork/v1alpha1/generated_expansion.go index 95cc218140..e14b258d54 100644 --- a/pkg/client/clientset/versioned/typed/stork/v1alpha1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/stork/v1alpha1/generated_expansion.go @@ -46,6 +46,8 @@ type MigrationScheduleExpansion interface{} type NamespacedSchedulePolicyExpansion interface{} +type ResourceTransformationExpansion interface{} + type RuleExpansion interface{} type SchedulePolicyExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/stork/v1alpha1/resourcetransformation.go b/pkg/client/clientset/versioned/typed/stork/v1alpha1/resourcetransformation.go new file mode 100644 index 0000000000..88c1ee89b2 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/stork/v1alpha1/resourcetransformation.go @@ -0,0 +1,195 @@ +/* +Copyright 2018 Openstorage.org + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + scheme "github.com/libopenstorage/stork/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ResourceTransformationsGetter has a method to return a ResourceTransformationInterface. +// A group's client should implement this interface. +type ResourceTransformationsGetter interface { + ResourceTransformations(namespace string) ResourceTransformationInterface +} + +// ResourceTransformationInterface has methods to work with ResourceTransformation resources. +type ResourceTransformationInterface interface { + Create(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.CreateOptions) (*v1alpha1.ResourceTransformation, error) + Update(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.UpdateOptions) (*v1alpha1.ResourceTransformation, error) + UpdateStatus(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.UpdateOptions) (*v1alpha1.ResourceTransformation, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceTransformation, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceTransformationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceTransformation, err error) + ResourceTransformationExpansion +} + +// resourceTransformations implements ResourceTransformationInterface +type resourceTransformations struct { + client rest.Interface + ns string +} + +// newResourceTransformations returns a ResourceTransformations +func newResourceTransformations(c *StorkV1alpha1Client, namespace string) *resourceTransformations { + return &resourceTransformations{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the resourceTransformation, and returns the corresponding resourceTransformation object, and an error if there is any. +func (c *resourceTransformations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceTransformation, err error) { + result = &v1alpha1.ResourceTransformation{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcetransformations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceTransformations that match those selectors. +func (c *resourceTransformations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceTransformationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ResourceTransformationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcetransformations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceTransformations. +func (c *resourceTransformations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resourcetransformations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resourceTransformation and creates it. Returns the server's representation of the resourceTransformation, and an error, if there is any. +func (c *resourceTransformations) Create(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.CreateOptions) (result *v1alpha1.ResourceTransformation, err error) { + result = &v1alpha1.ResourceTransformation{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourcetransformations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceTransformation). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a resourceTransformation and updates it. Returns the server's representation of the resourceTransformation, and an error, if there is any. +func (c *resourceTransformations) Update(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.UpdateOptions) (result *v1alpha1.ResourceTransformation, err error) { + result = &v1alpha1.ResourceTransformation{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcetransformations"). + Name(resourceTransformation.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceTransformation). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *resourceTransformations) UpdateStatus(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.UpdateOptions) (result *v1alpha1.ResourceTransformation, err error) { + result = &v1alpha1.ResourceTransformation{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcetransformations"). + Name(resourceTransformation.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceTransformation). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the resourceTransformation and deletes it. Returns an error if one occurs. +func (c *resourceTransformations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcetransformations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceTransformations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcetransformations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resourceTransformation. +func (c *resourceTransformations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceTransformation, err error) { + result = &v1alpha1.ResourceTransformation{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("resourcetransformations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/stork/v1alpha1/stork_client.go b/pkg/client/clientset/versioned/typed/stork/v1alpha1/stork_client.go index 012c2fc77b..4d16ab25e9 100644 --- a/pkg/client/clientset/versioned/typed/stork/v1alpha1/stork_client.go +++ b/pkg/client/clientset/versioned/typed/stork/v1alpha1/stork_client.go @@ -40,6 +40,7 @@ type StorkV1alpha1Interface interface { MigrationsGetter MigrationSchedulesGetter NamespacedSchedulePoliciesGetter + ResourceTransformationsGetter RulesGetter SchedulePoliciesGetter VolumeSnapshotRestoresGetter @@ -107,6 +108,10 @@ func (c *StorkV1alpha1Client) NamespacedSchedulePolicies(namespace string) Names return newNamespacedSchedulePolicies(c, namespace) } +func (c *StorkV1alpha1Client) ResourceTransformations(namespace string) ResourceTransformationInterface { + return newResourceTransformations(c, namespace) +} + func (c *StorkV1alpha1Client) Rules(namespace string) RuleInterface { return newRules(c, namespace) } diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index c444feedbe..b06fee0341 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -81,6 +81,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Stork().V1alpha1().MigrationSchedules().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("namespacedschedulepolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Stork().V1alpha1().NamespacedSchedulePolicies().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("resourcetransformations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Stork().V1alpha1().ResourceTransformations().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("rules"): return &genericInformer{resource: resource.GroupResource(), informer: f.Stork().V1alpha1().Rules().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("schedulepolicies"): diff --git a/pkg/client/informers/externalversions/stork/v1alpha1/interface.go b/pkg/client/informers/externalversions/stork/v1alpha1/interface.go index 10c8fd8791..e093bcc286 100644 --- a/pkg/client/informers/externalversions/stork/v1alpha1/interface.go +++ b/pkg/client/informers/externalversions/stork/v1alpha1/interface.go @@ -52,6 +52,8 @@ type Interface interface { MigrationSchedules() MigrationScheduleInformer // NamespacedSchedulePolicies returns a NamespacedSchedulePolicyInformer. NamespacedSchedulePolicies() NamespacedSchedulePolicyInformer + // ResourceTransformations returns a ResourceTransformationInformer. + ResourceTransformations() ResourceTransformationInformer // Rules returns a RuleInformer. Rules() RuleInformer // SchedulePolicies returns a SchedulePolicyInformer. @@ -143,6 +145,11 @@ func (v *version) NamespacedSchedulePolicies() NamespacedSchedulePolicyInformer return &namespacedSchedulePolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// ResourceTransformations returns a ResourceTransformationInformer. +func (v *version) ResourceTransformations() ResourceTransformationInformer { + return &resourceTransformationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // Rules returns a RuleInformer. func (v *version) Rules() RuleInformer { return &ruleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/informers/externalversions/stork/v1alpha1/resourcetransformation.go b/pkg/client/informers/externalversions/stork/v1alpha1/resourcetransformation.go new file mode 100644 index 0000000000..d2d52993d5 --- /dev/null +++ b/pkg/client/informers/externalversions/stork/v1alpha1/resourcetransformation.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 Openstorage.org + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + storkv1alpha1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + versioned "github.com/libopenstorage/stork/pkg/client/clientset/versioned" + internalinterfaces "github.com/libopenstorage/stork/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/libopenstorage/stork/pkg/client/listers/stork/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceTransformationInformer provides access to a shared informer and lister for +// ResourceTransformations. +type ResourceTransformationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ResourceTransformationLister +} + +type resourceTransformationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewResourceTransformationInformer constructs a new informer for ResourceTransformation type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceTransformationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceTransformationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceTransformationInformer constructs a new informer for ResourceTransformation type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceTransformationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorkV1alpha1().ResourceTransformations(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorkV1alpha1().ResourceTransformations(namespace).Watch(context.TODO(), options) + }, + }, + &storkv1alpha1.ResourceTransformation{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceTransformationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceTransformationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceTransformationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storkv1alpha1.ResourceTransformation{}, f.defaultInformer) +} + +func (f *resourceTransformationInformer) Lister() v1alpha1.ResourceTransformationLister { + return v1alpha1.NewResourceTransformationLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/listers/stork/v1alpha1/expansion_generated.go b/pkg/client/listers/stork/v1alpha1/expansion_generated.go index c1d53b74dc..c61fe5420b 100644 --- a/pkg/client/listers/stork/v1alpha1/expansion_generated.go +++ b/pkg/client/listers/stork/v1alpha1/expansion_generated.go @@ -118,6 +118,14 @@ type NamespacedSchedulePolicyListerExpansion interface{} // NamespacedSchedulePolicyNamespaceLister. type NamespacedSchedulePolicyNamespaceListerExpansion interface{} +// ResourceTransformationListerExpansion allows custom methods to be added to +// ResourceTransformationLister. +type ResourceTransformationListerExpansion interface{} + +// ResourceTransformationNamespaceListerExpansion allows custom methods to be added to +// ResourceTransformationNamespaceLister. +type ResourceTransformationNamespaceListerExpansion interface{} + // RuleListerExpansion allows custom methods to be added to // RuleLister. type RuleListerExpansion interface{} diff --git a/pkg/client/listers/stork/v1alpha1/resourcetransformation.go b/pkg/client/listers/stork/v1alpha1/resourcetransformation.go new file mode 100644 index 0000000000..7d7350a30f --- /dev/null +++ b/pkg/client/listers/stork/v1alpha1/resourcetransformation.go @@ -0,0 +1,99 @@ +/* +Copyright 2018 Openstorage.org + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ResourceTransformationLister helps list ResourceTransformations. +// All objects returned here must be treated as read-only. +type ResourceTransformationLister interface { + // List lists all ResourceTransformations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.ResourceTransformation, err error) + // ResourceTransformations returns an object that can list and get ResourceTransformations. + ResourceTransformations(namespace string) ResourceTransformationNamespaceLister + ResourceTransformationListerExpansion +} + +// resourceTransformationLister implements the ResourceTransformationLister interface. +type resourceTransformationLister struct { + indexer cache.Indexer +} + +// NewResourceTransformationLister returns a new ResourceTransformationLister. +func NewResourceTransformationLister(indexer cache.Indexer) ResourceTransformationLister { + return &resourceTransformationLister{indexer: indexer} +} + +// List lists all ResourceTransformations in the indexer. +func (s *resourceTransformationLister) List(selector labels.Selector) (ret []*v1alpha1.ResourceTransformation, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ResourceTransformation)) + }) + return ret, err +} + +// ResourceTransformations returns an object that can list and get ResourceTransformations. +func (s *resourceTransformationLister) ResourceTransformations(namespace string) ResourceTransformationNamespaceLister { + return resourceTransformationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ResourceTransformationNamespaceLister helps list and get ResourceTransformations. +// All objects returned here must be treated as read-only. +type ResourceTransformationNamespaceLister interface { + // List lists all ResourceTransformations in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.ResourceTransformation, err error) + // Get retrieves the ResourceTransformation from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.ResourceTransformation, error) + ResourceTransformationNamespaceListerExpansion +} + +// resourceTransformationNamespaceLister implements the ResourceTransformationNamespaceLister +// interface. +type resourceTransformationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ResourceTransformations in the indexer for a given namespace. +func (s resourceTransformationNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ResourceTransformation, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ResourceTransformation)) + }) + return ret, err +} + +// Get retrieves the ResourceTransformation from the indexer for a given namespace and name. +func (s resourceTransformationNamespaceLister) Get(name string) (*v1alpha1.ResourceTransformation, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("resourcetransformation"), name) + } + return obj.(*v1alpha1.ResourceTransformation), nil +} From bf76c3238207d5f5174a33d2b7452ddf26320c47 Mon Sep 17 00:00:00 2001 From: Ram Date: Sat, 27 Aug 2022 14:57:05 +0530 Subject: [PATCH 18/97] pwx-26151: skip collecting endpoints for headless service Signed-off-by: Ram --- pkg/resourcecollector/endpoint.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pkg/resourcecollector/endpoint.go b/pkg/resourcecollector/endpoint.go index c8e9215132..8b936c896d 100644 --- a/pkg/resourcecollector/endpoint.go +++ b/pkg/resourcecollector/endpoint.go @@ -7,6 +7,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +const ( + headlessService = "service.kubernetes.io/headless" +) + func (r *ResourceCollector) endpointsToBeCollected( object runtime.Unstructured, ) (bool, error) { @@ -34,6 +38,13 @@ func (r *ResourceCollector) endpointsToBeCollected( if _, ok := endpoint.Annotations[v1.LastAppliedConfigAnnotation]; !ok { return false, nil } + } + if endpoint.Labels != nil { + // skip collecting endpointfs for headless service + // https://kubernetes.io/docs/reference/labels-annotations-taints/#servicekubernetesioheadless + if _, ok := endpoint.Labels[headlessService]; ok { + return false, nil + } } return true, nil From 2119fe091c9f8b311dda5bfd6489c365a944b0be Mon Sep 17 00:00:00 2001 From: Aditya Dani Date: Thu, 1 Sep 2022 08:46:53 -0700 Subject: [PATCH 19/97] Update google SDK to version 399 --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index dacc9c82c5..2459c284d7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,7 +18,7 @@ RUN python3 -m pip install awscli && python3 -m pip install rsa --upgrade RUN curl -q -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-07-26/bin/linux/amd64/aws-iam-authenticator && \ chmod +x /usr/local/bin/aws-iam-authenticator -ARG GCLOUD_SDK=google-cloud-sdk-269.0.0-linux-x86_64.tar.gz +ARG GCLOUD_SDK=google-cloud-sdk-399.0.0-linux-x86_64.tar.gz # Remove the test directories # Also don't need gsutil RUN curl -q -o $GCLOUD_SDK https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/$GCLOUD_SDK && \ From e55507688ec1827b28859324e80a931eac75195d Mon Sep 17 00:00:00 2001 From: Lalatendu Das Date: Wed, 10 Aug 2022 07:38:04 +0000 Subject: [PATCH 20/97] pb-3002: call v1 version CRD API for k8s 1.22 or more. Fixed a v1beta1 based getCRD call which will fail for k8s-1.22 or more because these APIs are removed in k8s-1.22 onwards. Signed-off-by: Lalatendu Das --- .../controllers/applicationbackup.go | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index f461edc81f..8ffa98d7bc 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -32,6 +32,7 @@ import ( "github.com/sirupsen/logrus" "gocloud.dev/gcerrors" v1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" k8s_errors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -1018,6 +1019,39 @@ func (a *ApplicationBackupController) uploadCRDResources(backup *stork_api.Appli ruleset.AddPlural("quota", "quotas") ruleset.AddPlural("prometheus", "prometheuses") ruleset.AddPlural("mongodbcommunity", "mongodbcommunity") + v1CrdApiReqrd, err := version.RequiresV1Registration() + if err != nil { + return err + } + if v1CrdApiReqrd { + var crds []*apiextensionsv1.CustomResourceDefinition + for _, crd := range crdList.Items { + for _, v := range crd.Resources { + if _, ok := resKinds[v.Kind]; !ok { + continue + } + crdName := ruleset.Pluralize(strings.ToLower(v.Kind)) + "." + v.Group + res, err := apiextensions.Instance().GetCRD(crdName, metav1.GetOptions{}) + if err != nil { + if k8s_errors.IsNotFound(err) { + continue + } + log.ApplicationBackupLog(backup).Errorf("Unable to get custom resource definition for %s, err: %v", v.Kind, err) + return err + } + crds = append(crds, res) + } + + } + jsonBytes, err := json.MarshalIndent(crds, "", " ") + if err != nil { + return err + } + if err := a.uploadObject(backup, crdObjectName, jsonBytes); err != nil { + return err + } + return nil + } var crds []*apiextensionsv1beta1.CustomResourceDefinition for _, crd := range crdList.Items { for _, v := range crd.Resources { From a687fec12cfa45f1d68bcdcca33c38612b2a1642 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Sat, 6 Aug 2022 13:02:05 +0000 Subject: [PATCH 21/97] pb-3000: Added debug statement in GetObjLockInfo api --- pkg/objectstore/s3/s3.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pkg/objectstore/s3/s3.go b/pkg/objectstore/s3/s3.go index ca92953a74..31e724ceda 100644 --- a/pkg/objectstore/s3/s3.go +++ b/pkg/objectstore/s3/s3.go @@ -11,6 +11,7 @@ import ( "github.com/libopenstorage/secrets/aws/credentials" stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" "github.com/libopenstorage/stork/pkg/objectstore/common" + "github.com/sirupsen/logrus" "gocloud.dev/blob" "gocloud.dev/blob/s3blob" ) @@ -90,7 +91,9 @@ func GetObjLockInfo(backupLocation *stork_api.BackupLocation) (*common.ObjLockIn objLockInfo := &common.ObjLockInfo{} out, err := s3.New(sess).GetObjectLockConfiguration(input) if err != nil { + logrus.Warnf("GetObjLockInfo: GetObjectLockConfiguration failed with: %v", err) if awsErr, ok := err.(awserr.Error); ok { + logrus.Warnf("GetObjLockInfo: GetObjectLockConfiguration awsErr.Code %v", awsErr.Code()) // When a Minio server doesn't have object-lock implemented then above API // throws following error codes depending on version it runs for normal buckets // 1. "ObjectLockConfigurationNotFoundError" @@ -109,9 +112,11 @@ func GetObjLockInfo(backupLocation *stork_api.BackupLocation) (*common.ObjLockIn return nil, err } if (out != nil) && (out.ObjectLockConfiguration != nil) { + logrus.Warnf("GetObjLockInfo: out.ObjectLockConfiguration.ObjectLockEnabled: %v", aws.StringValue(out.ObjectLockConfiguration.ObjectLockEnabled)) if aws.StringValue(out.ObjectLockConfiguration.ObjectLockEnabled) == "Enabled" { objLockInfo.LockEnabled = true } else { + logrus.Infof("GetObjLockInfo ObjectLockConfiguration is empty: %v", out.ObjectLockConfiguration) // For some of the objectstore like FB and dell ECS, GetObjectLockConfiguration // will return empty objectlockconfiguration instead of nil or error return objLockInfo, nil @@ -123,9 +128,13 @@ func GetObjLockInfo(backupLocation *stork_api.BackupLocation) (*common.ObjLockIn objLockInfo.RetentionPeriodDays = aws.Int64Value(out.ObjectLockConfiguration.Rule.DefaultRetention.Days) } else { //This is an invalid object-lock config, no default-retention but object-loc enabled + logrus.Errorf("GetObjLockInfo: invalid config: object lock is enabled but default retention period is not set on the bucket") objLockInfo.LockEnabled = false return nil, fmt.Errorf("invalid config: object lock is enabled but default retention period is not set on the bucket") } } + // This debug statement will not be executed as both err and out can not be nil at the same time. + // Adding it, just in case, we hit it. + logrus.Infof("GetObjLockInfo: returning objLockInfo: %v - err %v", objLockInfo, err) return objLockInfo, err } From cf6d846a7d28f86b9beca72f7295849067859c98 Mon Sep 17 00:00:00 2001 From: Kesavan Thiruvenkadasamy Date: Thu, 22 Sep 2022 10:34:53 +0530 Subject: [PATCH 22/97] Added changes to fix integration test container build failure Bumped up the go version required for gotestsum package Signed-off-by: Kesavan Thiruvenkadasamy --- test/integration_test/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration_test/Dockerfile b/test/integration_test/Dockerfile index 6d21b84f56..21ec3a9936 100644 --- a/test/integration_test/Dockerfile +++ b/test/integration_test/Dockerfile @@ -1,8 +1,8 @@ -FROM golang:1.15.1 +FROM golang:1.16.1 # Install dependancies RUN apt-get update && \ - /usr/local/go/bin/go get -u gotest.tools/gotestsum + /usr/local/go/bin/go install gotest.tools/gotestsum@latest RUN apt-get update && apt-get install -y python3-pip From 2574558b0719acdc3d80c5c7126a90ac42ae4581 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Wed, 31 Aug 2022 14:36:10 +0000 Subject: [PATCH 23/97] pb-3005: Added fix to include the CRDs even if CR is are present. - With fix, we will include all the CRDs of a group, if one CRDs of a parrticular group had a CR in the given namespace. --- .../controllers/applicationbackup.go | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index 8ffa98d7bc..6ecd3d5a3d 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -1025,11 +1025,23 @@ func (a *ApplicationBackupController) uploadCRDResources(backup *stork_api.Appli } if v1CrdApiReqrd { var crds []*apiextensionsv1.CustomResourceDefinition + crdsGroups := make(map[string]bool) + // First collect the group detail for the CRDs, which has CR for _, crd := range crdList.Items { for _, v := range crd.Resources { if _, ok := resKinds[v.Kind]; !ok { continue } + crdsGroups[v.Group] = true + } + + } + // pick up all the CRDs that belongs to the group in the crdsGroups map + for _, crd := range crdList.Items { + for _, v := range crd.Resources { + if _, ok := crdsGroups[v.Group]; !ok { + continue + } crdName := ruleset.Pluralize(strings.ToLower(v.Kind)) + "." + v.Group res, err := apiextensions.Instance().GetCRD(crdName, metav1.GetOptions{}) if err != nil { @@ -1053,11 +1065,22 @@ func (a *ApplicationBackupController) uploadCRDResources(backup *stork_api.Appli return nil } var crds []*apiextensionsv1beta1.CustomResourceDefinition + crdsGroups := make(map[string]bool) + // First collect the group detail for the CRDs, which has CR for _, crd := range crdList.Items { for _, v := range crd.Resources { if _, ok := resKinds[v.Kind]; !ok { continue } + crdsGroups[v.Group] = true + } + } + // pick up all the CRDs that belongs to the group in the crdsGroups map + for _, crd := range crdList.Items { + for _, v := range crd.Resources { + if _, ok := crdsGroups[v.Group]; !ok { + continue + } crdName := ruleset.Pluralize(strings.ToLower(v.Kind)) + "." + v.Group res, err := apiextensions.Instance().GetCRDV1beta1(crdName, metav1.GetOptions{}) if err != nil { From e511a314eaa3ac540a77d085d85b3f17bcf305da Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Tue, 6 Sep 2022 02:51:34 +0000 Subject: [PATCH 24/97] pb-3025: Added support to take backup of webhook - added prepareMutatingWebHookForApply, prepareValidatingWebHookForApply, validatingWebHookToBeCollected and mutatingWebHookToBeCollected apis --- pkg/resourcecollector/resourcecollector.go | 15 +- pkg/resourcecollector/webhook.go | 194 +++++++++++++++++++++ pkg/version/version.go | 18 ++ 3 files changed, 223 insertions(+), 4 deletions(-) create mode 100644 pkg/resourcecollector/webhook.go diff --git a/pkg/resourcecollector/resourcecollector.go b/pkg/resourcecollector/resourcecollector.go index e8a9369e2c..0a5547e43d 100644 --- a/pkg/resourcecollector/resourcecollector.go +++ b/pkg/resourcecollector/resourcecollector.go @@ -181,7 +181,9 @@ func GetSupportedK8SResources(kind string, optionalResourceTypes []string) bool "LimitRange", "NetworkPolicy", "PodDisruptionBudget", - "Endpoints": + "Endpoints", + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration": return true case "Job": return slice.ContainsString(optionalResourceTypes, "job", strings.ToLower) || @@ -524,7 +526,6 @@ func (r *ResourceCollector) objectToBeCollected( } else if !include { return false, nil } - switch objectType.GetKind() { case "Service": return r.serviceToBeCollected(object) @@ -558,6 +559,10 @@ func (r *ResourceCollector) objectToBeCollected( return r.virtualMachineInstanceToBeCollected(object) case "Endpoints": return r.endpointsToBeCollected(object) + case "MutatingWebhookConfiguration": + return r.mutatingWebHookToBeCollected(object, namespace) + case "ValidatingWebhookConfiguration": + return r.validatingWebHookToBeCollected(object, namespace) } return true, nil @@ -766,7 +771,6 @@ func (r *ResourceCollector) PrepareResourceForApply( optionalResourceTypes []string, vInfo []*stork_api.ApplicationRestoreVolumeInfo, ) (bool, error) { - objectType, err := meta.TypeAccessor(object) if err != nil { return false, err @@ -793,7 +797,6 @@ func (r *ResourceCollector) PrepareResourceForApply( // Update the namespace of the object, will be no-op for clustered resources metadata.SetNamespace(val) } - switch objectType.GetKind() { case "Job": if slice.ContainsString(optionalResourceTypes, "job", strings.ToLower) || @@ -809,6 +812,10 @@ func (r *ResourceCollector) PrepareResourceForApply( return false, r.prepareClusterRoleBindingForApply(object, namespaceMappings) case "RoleBinding": return false, r.prepareRoleBindingForApply(object, namespaceMappings) + case "ValidatingWebhookConfiguration": + return false, r.prepareValidatingWebHookForApply(object, namespaceMappings) + case "MutatingWebhookConfiguration": + return false, r.prepareMutatingWebHookForApply(object, namespaceMappings) } return false, nil } diff --git a/pkg/resourcecollector/webhook.go b/pkg/resourcecollector/webhook.go new file mode 100644 index 0000000000..1799f62b69 --- /dev/null +++ b/pkg/resourcecollector/webhook.go @@ -0,0 +1,194 @@ +package resourcecollector + +import ( + "github.com/libopenstorage/stork/pkg/version" + "github.com/sirupsen/logrus" + admissionv1 "k8s.io/api/admissionregistration/v1" + admissionv1beta1 "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/runtime" +) + +func (r *ResourceCollector) prepareMutatingWebHookForApply( + object runtime.Unstructured, + namespaceMappings map[string]string, +) error { + ok, err := version.RequiresV1Webhooks() + if err != nil { + return err + } + if ok { + // v1 version + var webhookCfg admissionv1.MutatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("mutatingWebHookToBeCollected: failed in getting mutating webhook: err %v", err) + return err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if destNamespace, ok := namespaceMappings[webhook.ClientConfig.Service.Namespace]; ok { + // update the namespace with destination namespace + webhook.ClientConfig.Service.Namespace = destNamespace + } + } + } + o, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&webhookCfg) + if err != nil { + return err + } + object.SetUnstructuredContent(o) + return nil + } + // v1beta1 version + var webhookCfg admissionv1beta1.MutatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("mutatingWebHookToBeCollected: failed in getting mutating webhook: err %v", err) + return err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if destNamespace, ok := namespaceMappings[webhook.ClientConfig.Service.Namespace]; ok { + // update the namespace with destination namespace + webhook.ClientConfig.Service.Namespace = destNamespace + } + } + } + o, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&webhookCfg) + if err != nil { + return err + } + object.SetUnstructuredContent(o) + return nil +} + +func (r *ResourceCollector) prepareValidatingWebHookForApply( + object runtime.Unstructured, + namespaceMappings map[string]string, +) error { + ok, err := version.RequiresV1Webhooks() + if err != nil { + return err + } + if ok { + // v1 version + var webhookCfg admissionv1.ValidatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("validatingWebHookToBeCollected: failed in getting validating webhook: err %v", err) + return err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if destNamespace, ok := namespaceMappings[webhook.ClientConfig.Service.Namespace]; ok { + + // update the namespace with destination namespace + webhook.ClientConfig.Service.Namespace = destNamespace + } + } + } + o, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&webhookCfg) + if err != nil { + return err + } + object.SetUnstructuredContent(o) + return nil + } + // v1beta1 version + var webhookCfg admissionv1beta1.ValidatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("validatingWebHookToBeCollected: failed in getting validating webhook: err %v", err) + return err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if destNamespace, ok := namespaceMappings[webhook.ClientConfig.Service.Namespace]; ok { + // update the namespace with destination namespace + webhook.ClientConfig.Service.Namespace = destNamespace + } + } + } + o, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&webhookCfg) + if err != nil { + return err + } + object.SetUnstructuredContent(o) + return nil +} + +func (r *ResourceCollector) validatingWebHookToBeCollected( + object runtime.Unstructured, + namespace string, +) (bool, error) { + ok, err := version.RequiresV1Webhooks() + if err != nil { + return false, err + } + if ok { + // v1 version + var webhookCfg admissionv1.ValidatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("validatingWebHookToBeCollected: failed in getting validating webhook: err %v", err) + return false, err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if namespace == webhook.ClientConfig.Service.Namespace { + return true, nil + } + } + } + return false, nil + } + // v1beta1 version + var webhookCfg admissionv1beta1.ValidatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("validatingWebHookToBeCollected: failed in getting validating webhook: err %v", err) + return false, err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if namespace == webhook.ClientConfig.Service.Namespace { + return true, nil + } + } + } + return false, nil +} + +func (r *ResourceCollector) mutatingWebHookToBeCollected( + object runtime.Unstructured, + namespace string, +) (bool, error) { + ok, err := version.RequiresV1Webhooks() + if err != nil { + return false, err + } + if ok { + // v1 version + var webhookCfg admissionv1.MutatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("mutatingWebHookToBeCollected: failed in getting mutating webhook: err %v", err) + return false, err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if namespace == webhook.ClientConfig.Service.Namespace { + return true, nil + } + } + } + return false, nil + } + // v1beta1 version + var webhookCfg admissionv1beta1.MutatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("mutatingWebHookToBeCollected: failed in getting mutating webhook: err %v", err) + return false, err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if namespace == webhook.ClientConfig.Service.Namespace { + return true, nil + } + } + } + return false, nil +} diff --git a/pkg/version/version.go b/pkg/version/version.go index 9a9e17a343..bb28106aa6 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -24,6 +24,7 @@ var ( const ( k8sMinVersionCSIDriverV1 = "1.22" k8sMinVersionVolumeSnapshotV1 = "1.20" + K8sMinVersionWebhookv1 = "1.22" ) // RequiresV1Registration returns true if crd needs to be registered as apiVersion V1 @@ -43,6 +44,23 @@ func RequiresV1Registration() (bool, error) { return false, nil } +// RequiresV1Webhooks returns true if V1 version of webhook object is needed +func RequiresV1Webhooks() (bool, error) { + clusterK8sVersion, _, err := GetFullVersion() + if err != nil { + return false, err + } + requiredK8sVer, err := version.NewVersion(K8sMinVersionWebhookv1) + if err != nil { + return false, err + + } + if clusterK8sVersion.GreaterThanOrEqual(requiredK8sVer) { + return true, nil + } + return false, nil +} + // RequiresV1CSIdriver returns true if V1 version of CSIdriver APIs need to be called func RequiresV1CSIdriver() (bool, error) { clusterK8sVersion, _, err := GetFullVersion() From 0ef7c3734ecb059bd05c67bacbdcb759b016b90c Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Wed, 21 Sep 2022 01:33:36 +0000 Subject: [PATCH 25/97] pb--3113: Added error handling for netapp object store for getting objectlock conf. --- pkg/objectstore/s3/s3.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/objectstore/s3/s3.go b/pkg/objectstore/s3/s3.go index 31e724ceda..f2c00d5a04 100644 --- a/pkg/objectstore/s3/s3.go +++ b/pkg/objectstore/s3/s3.go @@ -101,9 +101,11 @@ func GetObjLockInfo(backupLocation *stork_api.BackupLocation) (*common.ObjLockIn // Similarly in case of AWS, we need to ignore "NoSuchBucket" so that // px-backup/stork can create the bucket on behalf user when validation flag is not set. // With cloudian objectstore, we saw the error as "ObjectLockConfigurationNotFound" + // With Netapp Trident, we saw the error as "NotImplemented" if awsErr.Code() == "ObjectLockConfigurationNotFoundError" || awsErr.Code() == "MethodNotAllowed" || awsErr.Code() == "ObjectLockConfigurationNotFound" || + awsErr.Code() == "NotImplemented" || awsErr.Code() == "NoSuchBucket" { // for a non-objectlocked bucket we needn't throw error return objLockInfo, nil From c15469b329d79a8a056c062fd1e8890afd28c0ea Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Sun, 2 Oct 2022 14:54:33 +0000 Subject: [PATCH 26/97] pb-3132: Added logic to call post exec rule after volume backup, if it is a generic backup. - If the backup volume list had only non-kdmp volumes, trigger the post exec rule after the startbackup API. - If the backup volume list had only kdmp volumes or mix of both kdmp and non-kdmp volume, we will call the post exec rule, once the volume backup is completed. --- .../controllers/applicationbackup.go | 52 ++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index 6ecd3d5a3d..506c5bba45 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -66,6 +66,9 @@ const ( maxRetry = 10 retrySleep = 10 * time.Second genericBackupKey = "BACKUP_TYPE" + kdmpDriverOnly = "kdmp" + nonKdmpDriverOnly = "nonkdmp" + mixedDriver = "mixed" ) var ( @@ -662,7 +665,9 @@ func (a *ApplicationBackupController) backupVolumes(backup *stork_api.Applicatio terminationChannels = nil // Run any post exec rules once backup is triggered - if backup.Spec.PostExecRule != "" { + driverCombo := a.checkVolumeDriverCombination(backup.Status.Volumes) + // If the driver combination of volumes are all non-kdmp, call the post exec rule immediately + if driverCombo == nonKdmpDriverOnly && backup.Spec.PostExecRule != "" { err = a.runPostExecRule(backup) if err != nil { message := fmt.Sprintf("Error running PostExecRule: %v", err) @@ -767,6 +772,32 @@ func (a *ApplicationBackupController) backupVolumes(backup *stork_api.Applicatio } } + // Run any post exec rules once backup is triggered + driverCombo := a.checkVolumeDriverCombination(backup.Status.Volumes) + // If the driver combination of volumes onlykdmp or mixed of both kdmp and non-kdmp, call post exec rule + // backup of volume is success. + if (driverCombo == kdmpDriverOnly || driverCombo == mixedDriver) && backup.Spec.PostExecRule != "" { + err = a.runPostExecRule(backup) + if err != nil { + message := fmt.Sprintf("Error running PostExecRule: %v", err) + log.ApplicationBackupLog(backup).Errorf(message) + a.recorder.Event(backup, + v1.EventTypeWarning, + string(stork_api.ApplicationBackupStatusFailed), + message) + + backup.Status.Stage = stork_api.ApplicationBackupStageFinal + backup.Status.FinishTimestamp = metav1.Now() + backup.Status.LastUpdateTimestamp = metav1.Now() + backup.Status.Status = stork_api.ApplicationBackupStatusFailed + backup.Status.Reason = message + err = a.client.Update(context.TODO(), backup) + if err != nil { + return err + } + return fmt.Errorf("%v", message) + } + } // If the backup hasn't failed move on to the next stage. if backup.Status.Status != stork_api.ApplicationBackupStatusFailed { backup.Status.Stage = stork_api.ApplicationBackupStageApplications @@ -1468,3 +1499,22 @@ func (a *ApplicationBackupController) cleanupResources( } return nil } + +func (a *ApplicationBackupController) checkVolumeDriverCombination(volumes []*stork_api.ApplicationBackupVolumeInfo) string { + var kdmpCount, totalCount, nonKdmpCount int + totalCount = len(volumes) + for _, vInfo := range volumes { + if vInfo.DriverName == volume.KDMPDriverName { + kdmpCount++ + } else { + nonKdmpCount++ + } + } + + if totalCount == kdmpCount { + return kdmpDriverOnly + } else if totalCount == nonKdmpCount { + return nonKdmpDriverOnly + } + return mixedDriver +} From f7d18209e026b02a54029ecfe142a3baf8632ca3 Mon Sep 17 00:00:00 2001 From: Aditya Dani Date: Mon, 19 Sep 2022 13:53:05 -0700 Subject: [PATCH 27/97] PWX-26900: Set the resource migration finish timestamp once pruning is done --- pkg/migration/controllers/migration.go | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/pkg/migration/controllers/migration.go b/pkg/migration/controllers/migration.go index a69ed317d9..e13ed74b32 100644 --- a/pkg/migration/controllers/migration.go +++ b/pkg/migration/controllers/migration.go @@ -1021,15 +1021,6 @@ func (m *MigrationController) migrateResources(migration *stork_api.Migration, v return err } - migration.Status.ResourceMigrationFinishTimestamp = metav1.Now() - migration.Status.Stage = stork_api.MigrationStageFinal - migration.Status.Status = stork_api.MigrationStatusSuccessful - for _, resource := range migration.Status.Resources { - if resource.Status != stork_api.MigrationStatusSuccessful { - migration.Status.Status = stork_api.MigrationStatusPartialSuccess - break - } - } if *migration.Spec.PurgeDeletedResources { if err := m.purgeMigratedResources(migration, resourceCollectorOpts); err != nil { message := fmt.Sprintf("Error cleaning up resources: %v", err) @@ -1042,6 +1033,16 @@ func (m *MigrationController) migrateResources(migration *stork_api.Migration, v } } + migration.Status.ResourceMigrationFinishTimestamp = metav1.Now() + migration.Status.Stage = stork_api.MigrationStageFinal + migration.Status.Status = stork_api.MigrationStatusSuccessful + for _, resource := range migration.Status.Resources { + if resource.Status != stork_api.MigrationStatusSuccessful { + migration.Status.Status = stork_api.MigrationStatusPartialSuccess + break + } + } + migration.Status.FinishTimestamp = metav1.Now() err = m.updateMigrationCR(context.TODO(), migration) if err != nil { @@ -1572,11 +1573,11 @@ func (m *MigrationController) applyResources( res.ResourceVersion = "" // if crds is applied as v1beta on k8s version 1.16+ it will have - // preservedUnkownField set and api version converted to v1 , + // preservedUnknownField set and api version converted to v1 , // which cause issue while applying it on dest cluster, // since we will be applying v1 crds with non-valid schema - // this converts `preserveUnknownFiels`(deprecated) to spec.Versions[*].xPreservedUnknown + // this converts `preserveUnknownFields`(deprecated) to spec.Versions[*].xPreservedUnknown // equivalent var updatedVersions []apiextensionsv1.CustomResourceDefinitionVersion if res.Spec.PreserveUnknownFields { From e5d585654f361acd30c15d8d24a244db094edadb Mon Sep 17 00:00:00 2001 From: Aditya Dani Date: Mon, 19 Sep 2022 22:17:16 -0700 Subject: [PATCH 28/97] Start the controller manager before starting any kubernetes watches --- cmd/stork/stork.go | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/cmd/stork/stork.go b/cmd/stork/stork.go index f3d27bc075..03972c19ca 100644 --- a/cmd/stork/stork.go +++ b/cmd/stork/stork.go @@ -290,6 +290,18 @@ func run(c *cli.Context) { eventBroadcaster.StartRecordingToSink(&core_v1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, api_v1.EventSource{Component: eventComponentName}) + // Create operator-sdk manager that will manage all controllers. + // Setup the controller manager before starting any watches / other controllers + mgr, err := manager.New(config, manager.Options{}) + if err != nil { + log.Fatalf("Setup controller manager: %v", err) + } + + // Setup scheme for all stork resources + if err := apis.AddToScheme(mgr.GetScheme()); err != nil { + log.Fatalf("Setup scheme failed for stork resources: %v", err) + } + var d volume.Driver if driverName != "" { log.Infof("Using driver %v", driverName) @@ -338,16 +350,6 @@ func run(c *cli.Context) { } } } - // Create operator-sdk manager that will manage all controllers. - mgr, err := manager.New(config, manager.Options{}) - if err != nil { - log.Fatalf("Setup controller manager: %v", err) - } - - // Setup scheme for all stork resources - if err := apis.AddToScheme(mgr.GetScheme()); err != nil { - log.Fatalf("Setup scheme failed for stork resources: %v", err) - } runFunc := func(context.Context) { runStork(mgr, d, recorder, c) From 91ff6bba318907a6ea8b73c7f73c54287a767a3b Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Sat, 1 Oct 2022 00:13:36 +0000 Subject: [PATCH 29/97] pb-3131: Added load balancer service type as well to reset the nodeport in resourcecollector. --- pkg/resourcecollector/service.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/resourcecollector/service.go b/pkg/resourcecollector/service.go index 8631e88e5b..131d324958 100644 --- a/pkg/resourcecollector/service.go +++ b/pkg/resourcecollector/service.go @@ -63,7 +63,8 @@ func (r *ResourceCollector) updateService( return err } - if service.Spec.Type == v1.ServiceTypeNodePort { + if service.Spec.Type == v1.ServiceTypeNodePort || + service.Spec.Type == v1.ServiceTypeLoadBalancer { for i := range service.Spec.Ports { service.Spec.Ports[i].NodePort = 0 } From a85d9948bfeab06769c528bdd5246881f904324e Mon Sep 17 00:00:00 2001 From: Aditya Dani Date: Mon, 3 Oct 2022 14:58:17 -0700 Subject: [PATCH 30/97] PWX-27136: Do not collect VirtualMachineInstanceMigration CR. --- pkg/resourcecollector/resourcecollector.go | 2 ++ pkg/resourcecollector/virtualmachineinstance.go | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/pkg/resourcecollector/resourcecollector.go b/pkg/resourcecollector/resourcecollector.go index 0a5547e43d..4132e0fea7 100644 --- a/pkg/resourcecollector/resourcecollector.go +++ b/pkg/resourcecollector/resourcecollector.go @@ -557,6 +557,8 @@ func (r *ResourceCollector) objectToBeCollected( return r.dataVolumesToBeCollected(object) case "VirtualMachineInstance": return r.virtualMachineInstanceToBeCollected(object) + case "VirtualMachineInstanceMigration": + return r.virtualMachineInstanceMigrationToBeCollected(object) case "Endpoints": return r.endpointsToBeCollected(object) case "MutatingWebhookConfiguration": diff --git a/pkg/resourcecollector/virtualmachineinstance.go b/pkg/resourcecollector/virtualmachineinstance.go index a6ea3bcbf8..701797f557 100644 --- a/pkg/resourcecollector/virtualmachineinstance.go +++ b/pkg/resourcecollector/virtualmachineinstance.go @@ -7,3 +7,9 @@ func (r *ResourceCollector) virtualMachineInstanceToBeCollected( ) (bool, error) { return false, nil } + +func (r *ResourceCollector) virtualMachineInstanceMigrationToBeCollected( + object runtime.Unstructured, +) (bool, error) { + return false, nil +} From 85a7478295dcf37161a8254e7c1637450a1aa678 Mon Sep 17 00:00:00 2001 From: Ram Date: Fri, 23 Sep 2022 14:28:13 +0530 Subject: [PATCH 31/97] PWX-26934: Handle panic for SkipDeletedNamespace flag Signed-off-by: Ram --- pkg/migration/controllers/migration.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/migration/controllers/migration.go b/pkg/migration/controllers/migration.go index e13ed74b32..e27b66c5b0 100644 --- a/pkg/migration/controllers/migration.go +++ b/pkg/migration/controllers/migration.go @@ -198,6 +198,10 @@ func setDefaults(spec stork_api.MigrationSpec) stork_api.MigrationSpec { defaultBool := false spec.IncludeNetworkPolicyWithCIDR = &defaultBool } + if spec.SkipDeletedNamespaces == nil { + defaultBool := true + spec.SkipDeletedNamespaces = &defaultBool + } return spec } From b4250e4b969321f612c001baf6623c98e6a519f3 Mon Sep 17 00:00:00 2001 From: Ram Date: Fri, 30 Sep 2022 14:30:00 +0530 Subject: [PATCH 32/97] Update migration resources with resource transformation events - add event in migration resource state - ensure svc update is not skipped when transform specs is provided Signed-off-by: Ram --- pkg/apis/stork/v1alpha1/migration.go | 1 + pkg/migration/controllers/migration.go | 30 ++------ .../controllers/resourcetransformation.go | 76 ++++++++++++------- pkg/resourcecollector/resourcecollector.go | 3 + .../resourcetransformation.go | 14 ++++ 5 files changed, 75 insertions(+), 49 deletions(-) diff --git a/pkg/apis/stork/v1alpha1/migration.go b/pkg/apis/stork/v1alpha1/migration.go index b8b7313a0d..3e6633d3e9 100644 --- a/pkg/apis/stork/v1alpha1/migration.go +++ b/pkg/apis/stork/v1alpha1/migration.go @@ -50,6 +50,7 @@ type MigrationResourceInfo struct { meta.GroupVersionKind `json:",inline"` Status MigrationStatusType `json:"status"` Reason string `json:"reason"` + TransformedBy string `json:"transformedBy"` } // MigrationSummary provides a short summary on the migration diff --git a/pkg/migration/controllers/migration.go b/pkg/migration/controllers/migration.go index e27b66c5b0..c181c1915d 100644 --- a/pkg/migration/controllers/migration.go +++ b/pkg/migration/controllers/migration.go @@ -384,7 +384,7 @@ func (m *MigrationController) handle(ctx context.Context, migration *stork_api.M m.recorder.Event(migration, v1.EventTypeWarning, string(stork_api.MigrationStatusFailed), - err.Error()) + errMsg) err = m.updateMigrationCR(context.Background(), migration) if err != nil { log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) @@ -406,26 +406,7 @@ func (m *MigrationController) handle(ctx context.Context, migration *stork_api.M } return nil } - // ensure to re-run dry-run for newly introduced object before - // starting migration - resp.Status.Resources = []*stork_api.TransformResourceInfo{} - resp.Status.Status = stork_api.ResourceTransformationStatusInitial - transform, err := storkops.Instance().UpdateResourceTransformation(resp) - if err != nil && !errors.IsNotFound(err) { - errMsg := fmt.Sprintf("Error updating transformation CR: %v", err) - log.MigrationLog(migration).Errorf(errMsg) - m.recorder.Event(migration, - v1.EventTypeWarning, - string(stork_api.MigrationStatusFailed), - err.Error()) - err = m.updateMigrationCR(context.Background(), migration) - if err != nil { - log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) - } - return nil - } - // wait for re-run of dry-run resources - if err := storkops.Instance().ValidateResourceTransformation(transform.Name, ns, 1*time.Minute, 5*time.Second); err != nil { + if err := storkops.Instance().ValidateResourceTransformation(resp.Name, ns, 1*time.Minute, 5*time.Second); err != nil { errMsg := fmt.Sprintf("transformation %s is not in ready state: %s", migration.Spec.TransformSpecs, resp.Status.Status) log.MigrationLog(migration).Errorf(errMsg) m.recorder.Event(migration, @@ -1152,6 +1133,11 @@ func (m *MigrationController) updateResourceStatus( (resource.Group == gkv.Group || (resource.Group == "core" && gkv.Group == "")) && resource.Version == gkv.Version && resource.Kind == gkv.Kind { + if _, ok := metadata.GetAnnotations()[resourcecollector.TransformedResourceName]; ok { + if len(migration.Spec.TransformSpecs) != 0 && len(migration.Spec.TransformSpecs) == 1 { + resource.TransformedBy = migration.Spec.TransformSpecs[0] + } + } resource.Status = status resource.Reason = reason eventType := v1.EventTypeNormal @@ -2020,7 +2006,7 @@ func (m *MigrationController) applyResources( case "Service": var skipUpdate bool skipUpdate, err = m.checkAndUpdateService(migration, o, objHash) - if err == nil && skipUpdate { + if err == nil && skipUpdate && len(migration.Spec.TransformSpecs) == 0 { break } fallthrough diff --git a/pkg/migration/controllers/resourcetransformation.go b/pkg/migration/controllers/resourcetransformation.go index 548aacd948..a183f60cf8 100644 --- a/pkg/migration/controllers/resourcetransformation.go +++ b/pkg/migration/controllers/resourcetransformation.go @@ -15,7 +15,7 @@ import ( "github.com/libopenstorage/stork/pkg/resourcecollector" "github.com/libopenstorage/stork/pkg/version" "github.com/portworx/sched-ops/k8s/apiextensions" - "github.com/portworx/sched-ops/k8s/core" + coreops "github.com/portworx/sched-ops/k8s/core" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" @@ -106,23 +106,6 @@ func (r *ResourceTransformationController) handle(ctx context.Context, transform } switch transform.Status.Status { case stork_api.ResourceTransformationStatusInitial: - ns := &v1.Namespace{} - ns.Name = getTransformNamespace(transform.Namespace) - _, err := core.Instance().CreateNamespace(ns) - if err != nil && !errors.IsAlreadyExists(err) { - message := fmt.Sprintf("Unable to create resource transformation namespace: %v", err) - log.TransformLog(transform).Errorf(message) - r.recorder.Event(transform, - v1.EventTypeWarning, - string(stork_api.ResourceTransformationStatusFailed), - message) - transform.Status.Status = stork_api.ResourceTransformationStatusFailed - err := r.client.Update(ctx, transform) - if err != nil { - return err - } - return nil - } err = r.validateSpecPath(transform) if err != nil { message := fmt.Sprintf("Unsupported resource for resource transformation found: %v", err) @@ -199,6 +182,43 @@ func (r *ResourceTransformationController) validateSpecPath(transform *stork_api func (r *ResourceTransformationController) validateTransformResource(ctx context.Context, transform *stork_api.ResourceTransformation) error { resourceCollectorOpts := resourcecollector.Options{} + config, err := clientcmd.BuildConfigFromFlags("", "") + if err != nil { + return err + } + localInterface, err := dynamic.NewForConfig(config) + if err != nil { + return err + } + localOps, err := coreops.NewForConfig(config) + if err != nil { + r.recorder.Event(transform, + v1.EventTypeWarning, + string(stork_api.ResourceTransformationStatusFailed), + err.Error()) + return nil + } + + // temp namespace to run dry-run of transformed resource option + remoteTempNamespace := getTransformNamespace(transform.Namespace) + ns := &v1.Namespace{} + ns.Name = remoteTempNamespace + _, err = localOps.CreateNamespace(ns) + if err != nil && !errors.IsAlreadyExists(err) { + message := fmt.Sprintf("Unable to create resource transformation namespace: %v", err) + log.TransformLog(transform).Errorf(message) + r.recorder.Event(transform, + v1.EventTypeWarning, + string(stork_api.ResourceTransformationStatusFailed), + message) + transform.Status.Status = stork_api.ResourceTransformationStatusFailed + err := r.client.Update(ctx, transform) + if err != nil { + return err + } + return nil + } + for _, spec := range transform.Spec.Objects { group, version, kind, err := getGVK(spec.Resource) if err != nil { @@ -228,15 +248,6 @@ func (r *ResourceTransformationController) validateTransformResource(ctx context log.TransformLog(transform).Errorf("Error getting resources kind:%s, err: %v", kind, err) return err } - // TODO: we can pass in remote config and dry run on remote cluster as well - localconfig, err := clientcmd.BuildConfigFromFlags("", "") - if err != nil { - return err - } - localInterface, err := dynamic.NewForConfig(localconfig) - if err != nil { - return err - } for _, path := range spec.Paths { // This can be handle by CRD validation- v1 version crd support if !(path.Operation == stork_api.AddResourcePath || path.Operation == stork_api.DeleteResourcePath || @@ -297,6 +308,17 @@ func (r *ResourceTransformationController) validateTransformResource(ctx context transform.Status.Status = stork_api.ResourceTransformationStatusFailed } } + + if err := localOps.DeleteNamespace(remoteTempNamespace); err != nil { + // log & generate event, but lets not fail resource transformation if + // transformation controller could not delete temp namespace created + message := fmt.Sprintf("Unable to delete resource transformation namespace %s: %v", ns.Name, err) + log.TransformLog(transform).Errorf(message) + r.recorder.Event(transform, + v1.EventTypeWarning, + string(stork_api.ResourceTransformationStatusFailed), + message) + } return r.client.Update(ctx, transform) } diff --git a/pkg/resourcecollector/resourcecollector.go b/pkg/resourcecollector/resourcecollector.go index 4132e0fea7..5c09880a12 100644 --- a/pkg/resourcecollector/resourcecollector.go +++ b/pkg/resourcecollector/resourcecollector.go @@ -46,6 +46,9 @@ const ( ProjectMappingsOption = "ProjectMappings" // IncludeResources to not skip resources of specific type IncludeResources = "stork.libopenstorage.org/include-resource" + // TransformedResourceName is the annotation used to check if resource has been updated + // as per transformation rules + TransformedResourceName = "stork.libopenstorage.org/resourcetransformation-name" // ServiceKind for k8s service resources ServiceKind = "Service" diff --git a/pkg/resourcecollector/resourcetransformation.go b/pkg/resourcecollector/resourcetransformation.go index 645245cbb4..bfc1c09e34 100644 --- a/pkg/resourcecollector/resourcetransformation.go +++ b/pkg/resourcecollector/resourcetransformation.go @@ -113,6 +113,20 @@ func TransformResources( } } } + // lets add annotation saying this resource has been transformed by migration/restore + // controller before applying + // set migration annotations + annotations, found, err := unstructured.NestedStringMap(content, "metadata", "annotations") + if err != nil { + return err + } + if !found { + annotations = make(map[string]string) + } + annotations[TransformedResourceName] = "true" + if err := unstructured.SetNestedStringMap(content, annotations, "metadata", "annotations"); err != nil { + return err + } object.SetUnstructuredContent(content) logrus.Infof("Updated resource of kind %v with patch , resource: %v", patch.Kind, object) } From ed7df1ae160399ec16b8657b9f4fdc849dff7a9e Mon Sep 17 00:00:00 2001 From: Ram Date: Thu, 6 Oct 2022 16:29:48 +0530 Subject: [PATCH 33/97] PWX-26882 update k8s libs to 1.21.5 to fix CVEs Signed-off-by: Ram --- go.mod | 73 +++--- go.sum | 58 +++++ .../prometheus/collectors/collectors.go | 16 ++ .../collectors/dbstats_collector.go | 119 +++++++++ .../collectors/dbstats_collector_go115.go | 30 +++ .../collectors/dbstats_collector_pre_go115.go | 26 ++ .../prometheus/collectors/expvar_collector.go | 57 +++++ .../prometheus/collectors/go_collector.go | 69 +++++ .../collectors/process_collector.go | 56 +++++ vendor/golang.org/x/time/rate/rate.go | 20 +- vendor/gopkg.in/yaml.v3/decode.go | 78 ++++-- vendor/gopkg.in/yaml.v3/parserc.go | 11 +- vendor/helm.sh/helm/v3/pkg/action/install.go | 30 ++- vendor/helm.sh/helm/v3/pkg/action/pull.go | 3 +- .../v3/pkg/downloader/chart_downloader.go | 6 +- .../helm.sh/helm/v3/pkg/downloader/manager.go | 12 +- vendor/helm.sh/helm/v3/pkg/getter/getter.go | 7 + .../helm.sh/helm/v3/pkg/getter/httpgetter.go | 21 +- vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go | 12 + .../third_party/forked/golang/LICENSE | 27 ++ .../third_party/forked/golang/PATENTS | 22 ++ .../third_party/forked/golang/LICENSE | 27 ++ .../third_party/forked/golang/PATENTS | 22 ++ vendor/k8s.io/cloud-provider/go.mod | 26 +- vendor/k8s.io/cloud-provider/go.sum | 42 ++-- vendor/k8s.io/code-generator/go.mod | 4 +- vendor/k8s.io/code-generator/go.sum | 65 +---- .../third_party/forked/golang/LICENSE | 27 ++ .../third_party/forked/golang/PATENTS | 22 ++ vendor/k8s.io/klog/v2/go.mod | 2 +- vendor/k8s.io/klog/v2/go.sum | 4 +- vendor/k8s.io/klog/v2/klog.go | 52 ++-- .../pkg/volume/util/subpath/subpath_linux.go | 3 +- vendor/k8s.io/mount-utils/fake_mounter.go | 4 + vendor/k8s.io/mount-utils/mount.go | 2 + vendor/k8s.io/mount-utils/mount_linux.go | 35 ++- .../k8s.io/mount-utils/mount_unsupported.go | 5 + vendor/k8s.io/mount-utils/mount_windows.go | 6 + .../third_party/forked/golang/LICENSE | 27 ++ .../third_party/forked/golang/PATENTS | 22 ++ .../third_party/forked/golang/net/ip.go | 236 ++++++++++++++++++ .../third_party/forked/golang/net/parse.go | 59 +++++ vendor/k8s.io/utils/net/ipnet.go | 4 +- vendor/k8s.io/utils/net/net.go | 12 +- vendor/k8s.io/utils/net/parse.go | 33 +++ vendor/k8s.io/utils/net/port.go | 2 +- vendor/modules.txt | 99 ++++---- .../controller-runtime/pkg/cache/cache.go | 8 +- .../pkg/cache/informer_cache.go | 11 +- .../pkg/cache/internal/cache_reader.go | 21 +- .../pkg/cache/internal/deleg_map.go | 1 - .../pkg/cache/internal/informers_map.go | 60 +++-- .../pkg/cache/internal/selector.go | 6 +- .../pkg/cache/multi_namespace_cache.go | 25 +- .../pkg/client/apiutil/apimachinery.go | 4 +- .../pkg/client/apiutil/dynamicrestmapper.go | 2 +- .../controller-runtime/pkg/client/client.go | 26 +- .../pkg/client/client_cache.go | 10 +- .../controller-runtime/pkg/client/dryrun.go | 22 +- .../pkg/client/interfaces.go | 2 +- .../pkg/client/metadata_client.go | 10 +- .../pkg/client/namespaced_client.go | 38 +-- .../controller-runtime/pkg/client/options.go | 12 +- .../pkg/client/typed_client.go | 14 +- .../pkg/client/unstructured_client.go | 14 +- .../controller-runtime/pkg/cluster/cluster.go | 10 +- .../controller-runtime/pkg/config/config.go | 16 +- .../pkg/config/v1alpha1/register.go | 4 +- .../pkg/config/v1alpha1/types.go | 12 +- .../pkg/controller/controller.go | 2 +- .../controller-runtime/pkg/handler/enqueue.go | 15 +- .../pkg/handler/enqueue_mapped.go | 8 +- .../pkg/handler/enqueue_owner.go | 15 +- .../pkg/handler/eventhandler.go | 8 +- .../controller-runtime/pkg/healthz/healthz.go | 9 +- .../pkg/internal/controller/controller.go | 34 ++- .../internal/controller/metrics/metrics.go | 15 +- .../pkg/internal/objectutil/objectutil.go | 7 +- .../pkg/leaderelection/leader_election.go | 5 +- .../controller-runtime/pkg/log/deleg.go | 8 +- .../controller-runtime/pkg/log/null.go | 14 +- .../pkg/log/warning_handler.go | 2 +- .../pkg/manager/internal.go | 11 +- .../controller-runtime/pkg/manager/manager.go | 17 +- .../pkg/metrics/client_go_adapter.go | 8 +- .../pkg/metrics/registry.go | 2 +- .../pkg/predicate/predicate.go | 18 +- .../pkg/runtime/inject/inject.go | 14 +- .../pkg/source/internal/eventsource.go | 8 +- .../controller-runtime/pkg/source/source.go | 14 +- .../pkg/webhook/admission/decode.go | 8 +- .../pkg/webhook/admission/defaulter.go | 5 +- .../pkg/webhook/admission/http.go | 6 +- .../pkg/webhook/admission/validator.go | 10 +- .../pkg/webhook/admission/webhook.go | 28 +-- .../controller-runtime/pkg/webhook/alias.go | 4 +- .../controller-runtime/pkg/webhook/server.go | 74 +++++- 97 files changed, 1718 insertions(+), 574 deletions(-) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/LICENSE create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/PATENTS create mode 100644 vendor/k8s.io/code-generator/third_party/forked/golang/LICENSE create mode 100644 vendor/k8s.io/code-generator/third_party/forked/golang/PATENTS create mode 100644 vendor/k8s.io/utils/internal/third_party/forked/golang/LICENSE create mode 100644 vendor/k8s.io/utils/internal/third_party/forked/golang/PATENTS create mode 100644 vendor/k8s.io/utils/internal/third_party/forked/golang/net/ip.go create mode 100644 vendor/k8s.io/utils/internal/third_party/forked/golang/net/parse.go create mode 100644 vendor/k8s.io/utils/net/parse.go diff --git a/go.mod b/go.mod index f51559ebd3..534425a34a 100644 --- a/go.mod +++ b/go.mod @@ -45,18 +45,19 @@ require ( google.golang.org/grpc v1.43.0 google.golang.org/grpc/examples v0.0.0-20220715232852-f601dfac73c9 // indirect gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.24.0 - k8s.io/apiextensions-apiserver v0.21.4 + k8s.io/apiextensions-apiserver v0.21.5 k8s.io/apimachinery v0.24.3 - k8s.io/apiserver v0.21.4 - k8s.io/cli-runtime v0.21.4 + k8s.io/apiserver v0.21.5 + k8s.io/cli-runtime v0.21.5 k8s.io/client-go v12.0.0+incompatible - k8s.io/code-generator v0.21.4 - k8s.io/component-helpers v0.21.4 + k8s.io/code-generator v0.21.5 + k8s.io/component-helpers v0.21.5 k8s.io/kube-scheduler v0.0.0 - k8s.io/kubectl v0.21.4 - k8s.io/kubernetes v1.21.4 - sigs.k8s.io/controller-runtime v0.9.0 + k8s.io/kubectl v0.21.5 + k8s.io/kubernetes v1.21.5 + sigs.k8s.io/controller-runtime v0.9.7 sigs.k8s.io/gcp-compute-persistent-disk-csi-driver v0.7.0 sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.3.0 ) @@ -79,34 +80,34 @@ replace ( github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 - helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.0 + helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.1 - k8s.io/api => k8s.io/api v0.21.4 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.4 - k8s.io/apimachinery => k8s.io/apimachinery v0.21.4 - k8s.io/apiserver => k8s.io/apiserver v0.21.4 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.4 - k8s.io/client-go => k8s.io/client-go v0.21.4 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.4 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.4 - k8s.io/code-generator => k8s.io/code-generator v0.21.4 - k8s.io/component-base => k8s.io/component-base v0.21.4 - k8s.io/component-helpers => k8s.io/component-helpers v0.21.4 - k8s.io/controller-manager => k8s.io/controller-manager v0.21.4 - k8s.io/cri-api => k8s.io/cri-api v0.21.4 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.4 - k8s.io/klog/v2 => k8s.io/klog/v2 v2.4.0 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.4 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.4 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.4 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.4 - k8s.io/kubectl => k8s.io/kubectl v0.21.4 - k8s.io/kubelet => k8s.io/kubelet v0.21.4 - k8s.io/kubernetes => k8s.io/kubernetes v1.21.4 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.4 - k8s.io/metrics => k8s.io/metrics v0.21.4 - k8s.io/mount-utils => k8s.io/mount-utils v0.21.4 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.4 - sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.0 + k8s.io/api => k8s.io/api v0.21.5 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.5 + k8s.io/apimachinery => k8s.io/apimachinery v0.21.5 + k8s.io/apiserver => k8s.io/apiserver v0.21.5 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.5 + k8s.io/client-go => k8s.io/client-go v0.21.5 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.5 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.5 + k8s.io/code-generator => k8s.io/code-generator v0.21.5 + k8s.io/component-base => k8s.io/component-base v0.21.5 + k8s.io/component-helpers => k8s.io/component-helpers v0.21.5 + k8s.io/controller-manager => k8s.io/controller-manager v0.21.5 + k8s.io/cri-api => k8s.io/cri-api v0.21.5 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.5 + k8s.io/klog/v2 => k8s.io/klog/v2 v2.8.0 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.5 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.5 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.5 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.5 + k8s.io/kubectl => k8s.io/kubectl v0.21.5 + k8s.io/kubelet => k8s.io/kubelet v0.21.5 + k8s.io/kubernetes => k8s.io/kubernetes v1.21.5 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.5 + k8s.io/metrics => k8s.io/metrics v0.21.5 + k8s.io/mount-utils => k8s.io/mount-utils v0.21.5 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.5 + sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.6 sigs.k8s.io/sig-storage-lib-external-provisioner/v6 => sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.3.0 ) diff --git a/go.sum b/go.sum index c60bf77143..d99c6b91ae 100644 --- a/go.sum +++ b/go.sum @@ -266,6 +266,7 @@ github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZw github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/banzaicloud/k8s-objectmatcher v1.5.1/go.mod h1:9MWY5HsM/OaTmoTirczhlO8UALbH722WgdpaaR7Y8OE= github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -275,6 +276,7 @@ github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3 github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= @@ -318,6 +320,7 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= @@ -1328,6 +1331,7 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= @@ -1344,6 +1348,7 @@ github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5X github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1351,6 +1356,7 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.m github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= @@ -1734,6 +1740,7 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= gocloud.dev v0.20.0 h1:mbEKMfnyPV7W1Rj35R1xXfjszs9dXkwSOq2KoFr25g8= gocloud.dev v0.20.0/go.mod h1:+Y/RpSXrJthIOM8uFNzWp6MRu9pFPNFEEZrQMxpkfIc= golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -2048,6 +2055,8 @@ golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2341,6 +2350,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= @@ -2348,6 +2359,8 @@ gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= helm.sh/helm/v3 v3.6.0 h1:/9IMxJ2lXJHbvTMHcW1AO71lXQHqDC+3bcpGp7yCsb8= helm.sh/helm/v3 v3.6.0/go.mod h1:mIIus8EOqj+obtycw3sidsR4ORr2aFDmXMSI3k+oeVY= +helm.sh/helm/v3 v3.6.1 h1:TQ6q4pAatXr7qh2fbLcb0oNd0I3J7kv26oo5cExKTtc= +helm.sh/helm/v3 v3.6.1/go.mod h1:mIIus8EOqj+obtycw3sidsR4ORr2aFDmXMSI3k+oeVY= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2361,28 +2374,52 @@ honnef.co/go/tools v0.1.2/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= k8s.io/api v0.21.4 h1:WtDkzTAuI31WZKDPeIYpEUA+WeUfXAmA7gwj6nzFfbc= k8s.io/api v0.21.4/go.mod h1:fTVGP+M4D8+00FN2cMnJqk/eb/GH53bvmNs2SVTmpFk= +k8s.io/api v0.21.5 h1:9zp3SslPRB+rqxhGKqqTo6VsN3HX0Ype1nWV6UQQ+Sk= +k8s.io/api v0.21.5/go.mod h1:Un8C5Hemo2r3MfPOjZvwQQ9KkBbiTBUCGrjlivo9uJ0= k8s.io/apiextensions-apiserver v0.21.4 h1:HkajN/vmT/9HnFmUxvpXfSGkTCvH/ax4e3+j6mqWUDU= k8s.io/apiextensions-apiserver v0.21.4/go.mod h1:OoC8LhI9LnV+wKjZkXIBbLUwtnOGJiTRE33qctH5CIk= +k8s.io/apiextensions-apiserver v0.21.5 h1:sCUpiB47ba59J57ZsqOvoxD3voc2nnR+sylAzHIwI8w= +k8s.io/apiextensions-apiserver v0.21.5/go.mod h1:iiakfVazpXLW8OkF2sH/p9XGgfE7XFSQuZFJ10QlXB4= k8s.io/apimachinery v0.21.4 h1:KDq0lWZVslHkuE5I7iGAQHwpK0aDTlar1E7IWEc4CNw= k8s.io/apimachinery v0.21.4/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= +k8s.io/apimachinery v0.21.5 h1:56bnsHcUNboSCbD779GGi4Lh5kHTDFUoDrnHbhLTiaw= +k8s.io/apimachinery v0.21.5/go.mod h1:3PfBV+4PPXNs0aueD+7fHcGyhdkFFYqXeshQtsKCi+4= k8s.io/apiserver v0.21.4 h1:egJgdhW0ueq5iJSY0c5YedPvRM2Ft/D3dcXOgwvs9jY= k8s.io/apiserver v0.21.4/go.mod h1:SErUuFBBPZUcD2nsUU8hItxoYheqyYr2o/pCINEPW8g= +k8s.io/apiserver v0.21.5 h1:iEPvJ2uwmyb7C4eScOj1fgPKCyCUGgMQU5+UREE87vE= +k8s.io/apiserver v0.21.5/go.mod h1:0bWmrAx3dxUUFSEw71U91Si5obhIvBcAmf8oVZUO58E= k8s.io/cli-runtime v0.21.4 h1:kvOzx6dKg+9wRuHTzSqo8tfTV6ixZCkmi+ag54s7mn8= k8s.io/cli-runtime v0.21.4/go.mod h1:eRbLHYkdVWzvG87yrkgGd8CqX6/+fAG9DTdAqTXmlRY= +k8s.io/cli-runtime v0.21.5 h1:ZpPmrRsHvzdMzXrcr1/ZSBHLKrhS1aHyMr2hGJNlNpI= +k8s.io/cli-runtime v0.21.5/go.mod h1:TKlcXsRVImtcPDGEe72pyZtD9UgBJNupIf3hmsIeekE= k8s.io/client-go v0.21.4 h1:tcwj167If+v+pIGrCjaPG7hFo6SqFPFCCgMJy+Vm8Jc= k8s.io/client-go v0.21.4/go.mod h1:t0/eMKyUAq/DoQ7vW8NVVA00/nomlwC+eInsS8PxSew= +k8s.io/client-go v0.21.5 h1:zkVidiWVgciPKYqWpMFMjCUF+4rRXcfkKoyQS1Ue21k= +k8s.io/client-go v0.21.5/go.mod h1:EUornVlr3rBrPKXUoMPNggJdEQmvFNMpYO3Kb6432kw= k8s.io/cloud-provider v0.21.4 h1:BPGDdyz49/ohnK3QMDWBtm39QnDm+bXIP5L7mj8AHUQ= k8s.io/cloud-provider v0.21.4/go.mod h1:9ogsWpFKWcYC0sGPu0YZ3FMLZIlaGBSFDCNXxhlCF1o= +k8s.io/cloud-provider v0.21.5 h1:wLWaGA3VrHNqP8J3eimmxDdmCfLnNl0JcpRRYhKsrrU= +k8s.io/cloud-provider v0.21.5/go.mod h1:8HT2WVbR6Xr6cc/B1+wnra/kgffFtUmPjsmUu9VMyv4= k8s.io/cluster-bootstrap v0.21.4/go.mod h1:GtXGuiEtdV4XQJcscR6qQCm/vtQWkhUi3qnl9KL9jzw= +k8s.io/cluster-bootstrap v0.21.5/go.mod h1:X6MX+aOJx6NzNlEe0iUIIcFKG06qC/fqHAyzAfAgaYo= k8s.io/code-generator v0.21.4 h1:vO8jVuEGV4UF+/2s/88Qg05MokE/1QUFi/Q2YDgz++A= k8s.io/code-generator v0.21.4/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= +k8s.io/code-generator v0.21.5 h1:7X6dJG4hzKFHChYpP02iF0XrXhenqQHc76QoKYzDZfI= +k8s.io/code-generator v0.21.5/go.mod h1:0K1k6o2ef8JD/j8LF3ZuqWLGFMHvO5psNzLLmxf7ZVE= k8s.io/component-base v0.21.4 h1:Bc0AttSyhJFVXEIHz+VX+D11j/5z7SPPhl6whiXaRzs= k8s.io/component-base v0.21.4/go.mod h1:ZKG0eHVX+tUDcaoIGpU3Vtk4TIjMddN9uhEWDmW6Nyg= +k8s.io/component-base v0.21.5 h1:icFqcFDrO9S+FQpGohzVm6qce9vlo131K0r3NhElxiQ= +k8s.io/component-base v0.21.5/go.mod h1:UyRaqQfPkBL/haEFaMWgVQvtom5TqAT+jqlFGlh6LuU= k8s.io/component-helpers v0.21.4 h1:Q6L3sQ+L5uaaUcsJkhlzU5UchcIYBZ56Y2Bq5k4qOtk= k8s.io/component-helpers v0.21.4/go.mod h1:/5TBNWmxaAymZweO1JWv3Pt5rcYJV1LbWWY0x1rDdVU= +k8s.io/component-helpers v0.21.5 h1:NzRIDAmDk0tJw2OSvDIlkXQ/j96MUKW0PF/htVH6S1g= +k8s.io/component-helpers v0.21.5/go.mod h1:sjHa2QESu4iHcL20eSKyIvCYEKdxQyS3LthUe10tt0k= k8s.io/controller-manager v0.21.4/go.mod h1:a/iL7W19zkyirHDaupk9cyC11nejVznGwZI6I8tbyQY= +k8s.io/controller-manager v0.21.5/go.mod h1:65guJmOoIEklTPwwYOmiHOsbEoAnQ7xKSZ9v+RdJC6g= k8s.io/cri-api v0.21.4/go.mod h1:ukzeKnOkrG9/+ghKZA57WeZbQfRtqlGLF5GcF3RtHZ8= +k8s.io/cri-api v0.21.5/go.mod h1:hYY+ZI/gXC3XMHIvuzRzDtb5BCEyoAOf44Z4a8GxoTk= k8s.io/csi-translation-lib v0.21.4/go.mod h1:WtxJW4/3XGhllbRCO4SRkL/MyLhjaRsL6Ds+q0pDHTg= +k8s.io/csi-translation-lib v0.21.5/go.mod h1:3ypbZqeM13aqwC1CpovssPkMhLgITWumH3n9PkdhDEA= k8s.io/gengo v0.0.0-20190306031000-7a1b7fb0289f/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw= @@ -2394,25 +2431,42 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-aggregator v0.21.4/go.mod h1:SykygeaVEQfqYH5IV8ve7Ia3dEGOGpGrdfD5NBi5yYI= +k8s.io/kube-aggregator v0.21.5/go.mod h1:Zs74KHeA5RYNQw88cjfMtp46VCTIgQIX56FcxDE5NFo= k8s.io/kube-controller-manager v0.21.4/go.mod h1:/wPS1gIX++/WjsIiimESnkpMqsjiIAMOpjVwjqLo7ng= +k8s.io/kube-controller-manager v0.21.5/go.mod h1:adzsSLzeO3vkaxOTdbvHIe5WJZ7naB+s6080uCToGs0= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20210216185858-15cd8face8d6/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-proxy v0.21.4/go.mod h1:eUxSO/0Z/0JjKYz/aCZdwGea7lazumkTFrqS+OWcVNI= +k8s.io/kube-proxy v0.21.5/go.mod h1:brL44h883BThxzRIcIGUiOCJpTXq5Bbq/InSMYAsdB4= k8s.io/kube-scheduler v0.21.4 h1:oUVUCM+v6rum1i5vn5C3ZrqPNkp7exWiy7/Tfzbs9ZQ= k8s.io/kube-scheduler v0.21.4/go.mod h1:zFiUfgeM/dJajfHYG8Bx5fSrNAcLxMHFgN7ARdSJXqQ= +k8s.io/kube-scheduler v0.21.5 h1:yjm5Z3pIRwORBcR7HovteRhhC58+I/gCc07wO/HMYUI= +k8s.io/kube-scheduler v0.21.5/go.mod h1:7hWWLzvl0yEr+gm2Kfvt1wikhXwQb2BNylvOwzSlSMM= k8s.io/kubectl v0.21.4 h1:ODXpSKpi5C6XnJmGg96E/36KAry513v4Jr9Efg3ePJI= k8s.io/kubectl v0.21.4/go.mod h1:rRYB5HeScoGQKxZDQmus17pTSVIuqfm0D31ApET/qSM= +k8s.io/kubectl v0.21.5 h1:Ov5ivI1SanAoVPI/n6/Sik+MQTaeGp7U2S02loXBB/s= +k8s.io/kubectl v0.21.5/go.mod h1:1dDgqGZdQWH6IOLozcxQ3Tyvc5CnEL1Int6St4XEV8w= k8s.io/kubelet v0.21.4/go.mod h1:kgXUz8upYNIngMSEZP1rpg2kp4gfUrsB7ir5u9Cm4HE= +k8s.io/kubelet v0.21.5/go.mod h1:yVKsH4usaXy40Z3cZ8jknE70obOF/4aFNB7bittEEZ0= k8s.io/kubernetes v1.21.4 h1:uKnn+MDBG4Bsed/iD3L6gMkq/szAnMqeHuSjkc3WOzQ= k8s.io/kubernetes v1.21.4/go.mod h1:yNRsD2sfx76jpLKTgr0lJdVnILFWRo7b+HCo94tD48c= +k8s.io/kubernetes v1.21.5 h1:PpXs+a5FdF5Nwy+9vPjs5svULcTH923QCOjzdLqZmyw= +k8s.io/kubernetes v1.21.5/go.mod h1:o8QsgtH5UB3z9BYhcUZt9S6zjcJ4vdFsj2ACinL44Ss= k8s.io/legacy-cloud-providers v0.21.4/go.mod h1:WzvDvkWfD7lKQSaSqqaYsoY3VQeAjhXYN2telpMx8co= +k8s.io/legacy-cloud-providers v0.21.5/go.mod h1:VGdzalKK13Q8eJuhbrmPbuwyjc9vVaQ8T0asHpSJNBg= k8s.io/metrics v0.21.4/go.mod h1:uhWoVuVumUMSeCa1B1p2tm4Y4XuZIg0n24QEtB54wuA= +k8s.io/metrics v0.21.5/go.mod h1:Ew+6obDfJiQVsi6J2NkoI5jNMio/CCPC5v3pLXH8vos= k8s.io/mount-utils v0.21.4 h1:T24Y4FJ9IRkXgA+UkQHr+F+f/nm7sqdkdmdSxTtF+lw= k8s.io/mount-utils v0.21.4/go.mod h1:dwXbIPxKtTjrBEaX1aK/CMEf1KZ8GzMHpe3NEBfdFXI= +k8s.io/mount-utils v0.21.5 h1:2aapn4dg0L/naSvr9vze7vIjW6nelq3hNHxb2nLselc= +k8s.io/mount-utils v0.21.5/go.mod h1:dwXbIPxKtTjrBEaX1aK/CMEf1KZ8GzMHpe3NEBfdFXI= k8s.io/sample-apiserver v0.21.4/go.mod h1:rpVLxky91DoN2OehmyZf/IE+sgop/BBoZl78VJrrs0I= +k8s.io/sample-apiserver v0.21.5/go.mod h1:XqwON+6Rv40cwSe+Sr6ihQEcMI1MCvin8sDFAPFVQHc= k8s.io/sample-controller v0.20.4/go.mod h1:PAxO4dMU0MA62CB6ZyHM2rng/7oMOBLyF4qrDVA0Tcc= k8s.io/system-validators v1.4.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= k8s.io/test-infra v0.0.0-20181019233642-2e10a0bbe9b3/go.mod h1:2NzXB13Ji0nqpyublHeiPC4FZwU0TknfvyaaNfl/BTA= @@ -2426,6 +2480,8 @@ k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 h1:DnzUXII7sVg1FJ/4JX6YDRJfLNAC7idRatPwe07suiI= +k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= knative.dev/pkg v0.0.0-20191101194912-56c2594e4f11/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q= layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= @@ -2446,6 +2502,8 @@ sigs.k8s.io/cluster-api v0.2.11/go.mod h1:BCw+Pqy1sc8mQ/3d2NZM/f5BApKFCMPsnGvKol sigs.k8s.io/container-object-storage-interface-spec v0.0.0-20220211001052-50e143052de8/go.mod h1:kafkL5l/lTUrZXhVi/9p1GzpEE/ts29BkWkL3Ao33WU= sigs.k8s.io/controller-runtime v0.9.0 h1:ZIZ/dtpboPSbZYY7uUz2OzrkaBTOThx2yekLtpGB+zY= sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= +sigs.k8s.io/controller-runtime v0.9.6 h1:EevVMlgUj4fC1NVM4+DB3iPkWkmGRNarA66neqv9Qew= +sigs.k8s.io/controller-runtime v0.9.6/go.mod h1:q6PpkM5vqQubEKUKOM6qr06oXGzOBcCby1DA9FbyZeA= sigs.k8s.io/gcp-compute-persistent-disk-csi-driver v0.7.0 h1:mvSbjzrnOd+3AB/7jvz7UNdZs5fhYorhm2H0A2HcIVg= sigs.k8s.io/gcp-compute-persistent-disk-csi-driver v0.7.0/go.mod h1:aSyCjg9bNQQxY9hnnNo10vjhZsQTkLliruvRXp3N9B4= sigs.k8s.io/kustomize/api v0.8.8 h1:G2z6JPSSjtWWgMeWSoHdXqyftJNmMmyxXpwENGoOtGE= diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go new file mode 100644 index 0000000000..c4d0f5c35b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go @@ -0,0 +1,16 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package collectors provides implementations of prometheus.Collector to +// conveniently collect process and Go-related metrics. +package collectors diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go new file mode 100644 index 0000000000..e09f149d76 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go @@ -0,0 +1,119 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +type dbStatsCollector struct { + db *sql.DB + + maxOpenConnections *prometheus.Desc + + openConnections *prometheus.Desc + inUseConnections *prometheus.Desc + idleConnections *prometheus.Desc + + waitCount *prometheus.Desc + waitDuration *prometheus.Desc + maxIdleClosed *prometheus.Desc + maxIdleTimeClosed *prometheus.Desc + maxLifetimeClosed *prometheus.Desc +} + +// NewDBStatsCollector returns a collector that exports metrics about the given *sql.DB. +// See https://golang.org/pkg/database/sql/#DBStats for more information on stats. +func NewDBStatsCollector(db *sql.DB, dbName string) prometheus.Collector { + fqName := func(name string) string { + return "go_sql_" + name + } + return &dbStatsCollector{ + db: db, + maxOpenConnections: prometheus.NewDesc( + fqName("max_open_connections"), + "Maximum number of open connections to the database.", + nil, prometheus.Labels{"db_name": dbName}, + ), + openConnections: prometheus.NewDesc( + fqName("open_connections"), + "The number of established connections both in use and idle.", + nil, prometheus.Labels{"db_name": dbName}, + ), + inUseConnections: prometheus.NewDesc( + fqName("in_use_connections"), + "The number of connections currently in use.", + nil, prometheus.Labels{"db_name": dbName}, + ), + idleConnections: prometheus.NewDesc( + fqName("idle_connections"), + "The number of idle connections.", + nil, prometheus.Labels{"db_name": dbName}, + ), + waitCount: prometheus.NewDesc( + fqName("wait_count_total"), + "The total number of connections waited for.", + nil, prometheus.Labels{"db_name": dbName}, + ), + waitDuration: prometheus.NewDesc( + fqName("wait_duration_seconds_total"), + "The total time blocked waiting for a new connection.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxIdleClosed: prometheus.NewDesc( + fqName("max_idle_closed_total"), + "The total number of connections closed due to SetMaxIdleConns.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxIdleTimeClosed: prometheus.NewDesc( + fqName("max_idle_time_closed_total"), + "The total number of connections closed due to SetConnMaxIdleTime.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxLifetimeClosed: prometheus.NewDesc( + fqName("max_lifetime_closed_total"), + "The total number of connections closed due to SetConnMaxLifetime.", + nil, prometheus.Labels{"db_name": dbName}, + ), + } +} + +// Describe implements Collector. +func (c *dbStatsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.maxOpenConnections + ch <- c.openConnections + ch <- c.inUseConnections + ch <- c.idleConnections + ch <- c.waitCount + ch <- c.waitDuration + ch <- c.maxIdleClosed + ch <- c.maxLifetimeClosed + c.describeNewInGo115(ch) +} + +// Collect implements Collector. +func (c *dbStatsCollector) Collect(ch chan<- prometheus.Metric) { + stats := c.db.Stats() + ch <- prometheus.MustNewConstMetric(c.maxOpenConnections, prometheus.GaugeValue, float64(stats.MaxOpenConnections)) + ch <- prometheus.MustNewConstMetric(c.openConnections, prometheus.GaugeValue, float64(stats.OpenConnections)) + ch <- prometheus.MustNewConstMetric(c.inUseConnections, prometheus.GaugeValue, float64(stats.InUse)) + ch <- prometheus.MustNewConstMetric(c.idleConnections, prometheus.GaugeValue, float64(stats.Idle)) + ch <- prometheus.MustNewConstMetric(c.waitCount, prometheus.CounterValue, float64(stats.WaitCount)) + ch <- prometheus.MustNewConstMetric(c.waitDuration, prometheus.CounterValue, stats.WaitDuration.Seconds()) + ch <- prometheus.MustNewConstMetric(c.maxIdleClosed, prometheus.CounterValue, float64(stats.MaxIdleClosed)) + ch <- prometheus.MustNewConstMetric(c.maxLifetimeClosed, prometheus.CounterValue, float64(stats.MaxLifetimeClosed)) + c.collectNewInGo115(ch, stats) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go new file mode 100644 index 0000000000..a6e6268ce3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go @@ -0,0 +1,30 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.15 + +package collectors + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +func (c *dbStatsCollector) describeNewInGo115(ch chan<- *prometheus.Desc) { + ch <- c.maxIdleTimeClosed +} + +func (c *dbStatsCollector) collectNewInGo115(ch chan<- prometheus.Metric, stats sql.DBStats) { + ch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed)) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go new file mode 100644 index 0000000000..0568affe29 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go @@ -0,0 +1,26 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.15 + +package collectors + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +func (c *dbStatsCollector) describeNewInGo115(ch chan<- *prometheus.Desc) {} + +func (c *dbStatsCollector) collectNewInGo115(ch chan<- prometheus.Metric, stats sql.DBStats) {} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go new file mode 100644 index 0000000000..3aa8d0590b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go @@ -0,0 +1,57 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewExpvarCollector returns a newly allocated expvar Collector. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*prometheus.Desc) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewExpvarCollector(exports) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go new file mode 100644 index 0000000000..edaa4e50b7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go @@ -0,0 +1,69 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewGoCollector returns a collector that exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This requires to “stop the world”, which usually only happens for +// garbage collection (GC). Take the following implications into account when +// deciding whether to use the Go collector: +// +// 1. The performance impact of stopping the world is the more relevant the more +// frequently metrics are collected. However, with Go1.9 or later the +// stop-the-world time per metrics collection is very short (~25µs) so that the +// performance impact will only matter in rare cases. However, with older Go +// versions, the stop-the-world duration depends on the heap size and can be +// quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +// +// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the +// metrics collection happens to coincide with GC, it will only complete after +// GC has finished. Usually, GC is fast enough to not cause problems. However, +// with a very large heap, GC might take multiple seconds, which is enough to +// cause scrape timeouts in common setups. To avoid this problem, the Go +// collector will use the memstats from a previous collection if +// runtime.ReadMemStats takes more than 1s. However, if there are no previously +// collected memstats, or their collection is more than 5m ago, the collection +// will block until runtime.ReadMemStats succeeds. +// +// NOTE: The problem is solved in Go 1.15, see +// https://github.com/golang/go/issues/19812 for the related Go issue. +func NewGoCollector() prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewGoCollector() +} + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewBuildInfoCollector() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go new file mode 100644 index 0000000000..24558f50a7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go @@ -0,0 +1,56 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time. The detailed behavior is defined by the provided +// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a +// collector for the current process with an empty namespace string and no error +// reporting. +// +// The collector only works on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. On other operating systems, it will not +// collect any metrics. +func NewProcessCollector(opts ProcessCollectorOpts) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{ + PidFn: opts.PidFn, + Namespace: opts.Namespace, + ReportErrors: opts.ReportErrors, + }) +} diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index a98fe77827..0cfcc8463c 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -145,7 +145,6 @@ func (r *Reservation) DelayFrom(now time.Time) time.Duration { // Cancel is shorthand for CancelAt(time.Now()). func (r *Reservation) Cancel() { r.CancelAt(time.Now()) - return } // CancelAt indicates that the reservation holder will not perform the reserved action @@ -186,8 +185,6 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.lastEvent = prevEvent } } - - return } // Reserve is shorthand for ReserveN(time.Now(), 1). @@ -367,20 +364,13 @@ func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, last = now } - // Avoid making delta overflow below when last is very old. - maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens) - elapsed := now.Sub(last) - if elapsed > maxElapsed { - elapsed = maxElapsed - } - // Calculate the new number of tokens, due to time that passed. + elapsed := now.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens } @@ -388,15 +378,11 @@ func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, // of time it takes to accumulate them at a rate of limit tokens per second. func (limit Limit) durationFromTokens(tokens float64) time.Duration { seconds := tokens / float64(limit) - return time.Nanosecond * time.Duration(1e9*seconds) + return time.Duration(float64(time.Second) * seconds) } // tokensFromDuration is a unit conversion function from a time duration to the number of tokens // which could be accumulated during that duration at a rate of limit tokens per second. func (limit Limit) tokensFromDuration(d time.Duration) float64 { - // Split the integer and fractional parts ourself to minimize rounding errors. - // See golang.org/issues/34861. - sec := float64(d/time.Second) * float64(limit) - nsec := float64(d%time.Second) * float64(limit) - return sec + nsec/1e9 + return d.Seconds() * float64(limit) } diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go index df36e3a30f..0173b6982e 100644 --- a/vendor/gopkg.in/yaml.v3/decode.go +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -100,7 +100,10 @@ func (p *parser) peek() yaml_event_type_t { if p.event.typ != yaml_NO_EVENT { return p.event.typ } - if !yaml_parser_parse(&p.parser, &p.event) { + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { p.fail() } return p.event.typ @@ -320,6 +323,8 @@ type decoder struct { decodeCount int aliasCount int aliasDepth int + + mergedFields map[interface{}]bool } var ( @@ -808,6 +813,11 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + mapIsNew := false if out.IsNil() { out.Set(reflect.MakeMap(outt)) @@ -815,11 +825,18 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } for i := 0; i < l; i += 2 { if isMerge(n.Content[i]) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } k := reflect.New(kt).Elem() if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if mergedFields[ki] { + continue + } + mergedFields[ki] = true + } kkind := k.Kind() if kkind == reflect.Interface { kkind = k.Elem().Kind() @@ -833,6 +850,12 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + d.stringMapType = stringMapType d.generalMapType = generalMapType return true @@ -844,7 +867,8 @@ func isStringMap(n *Node) bool { } l := len(n.Content) for i := 0; i < l; i += 2 { - if n.Content[i].ShortTag() != strTag { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { return false } } @@ -861,7 +885,6 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { var elemType reflect.Type if sinfo.InlineMap != -1 { inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) elemType = inlineMap.Type().Elem() } @@ -870,6 +893,9 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.prepare(n, field) } + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node var doneFields []bool if d.uniqueKeys { doneFields = make([]bool, len(sinfo.FieldsList)) @@ -879,13 +905,20 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { for i := 0; i < l; i += 2 { ni := n.Content[i] if isMerge(ni) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } if !d.unmarshal(ni, name) { continue } - if info, ok := sinfo.FieldsMap[name.String()]; ok { + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { if d.uniqueKeys { if doneFields[info.Id] { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) @@ -911,6 +944,11 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } return true } @@ -918,19 +956,29 @@ func failWantMap() { failf("map merge requires map or sequence of maps as the value") } -func (d *decoder) merge(n *Node, out reflect.Value) { - switch n.Kind { +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.mergedFields[k.Interface()] = true + } + } + } + + switch merge.Kind { case MappingNode: - d.unmarshal(n, out) + d.unmarshal(merge, out) case AliasNode: - if n.Alias != nil && n.Alias.Kind != MappingNode { + if merge.Alias != nil && merge.Alias.Kind != MappingNode { failWantMap() } - d.unmarshal(n, out) + d.unmarshal(merge, out) case SequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.Content) - 1; i >= 0; i-- { - ni := n.Content[i] + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] if ni.Kind == AliasNode { if ni.Alias != nil && ni.Alias.Kind != MappingNode { failWantMap() @@ -943,6 +991,8 @@ func (d *decoder) merge(n *Node, out reflect.Value) { default: failWantMap() } + + d.mergedFields = mergedFields } func isMerge(n *Node) bool { diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go index ac66fccc05..268558a0d6 100644 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -687,6 +687,9 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -786,7 +789,7 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { } token := peek_token(parser) - if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { return } @@ -813,6 +816,9 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -922,6 +928,9 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } diff --git a/vendor/helm.sh/helm/v3/pkg/action/install.go b/vendor/helm.sh/helm/v3/pkg/action/install.go index af99717d13..25274fcd26 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/install.go +++ b/vendor/helm.sh/helm/v3/pkg/action/install.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "io/ioutil" + "net/url" "os" "path" "path/filepath" @@ -113,6 +114,7 @@ type ChartPathOptions struct { InsecureSkipTLSverify bool // --insecure-skip-verify Keyring string // --keyring Password string // --password + PassCredentialsAll bool // --pass-credentials RepoURL string // --repo Username string // --username Verify bool // --verify @@ -654,7 +656,7 @@ func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) ( Keyring: c.Keyring, Getters: getter.All(settings), Options: []getter.Option{ - getter.WithBasicAuth(c.Username, c.Password), + getter.WithPassCredentialsAll(c.PassCredentialsAll), getter.WithTLSClientConfig(c.CertFile, c.KeyFile, c.CaFile), getter.WithInsecureSkipVerifyTLS(c.InsecureSkipTLSverify), }, @@ -665,12 +667,34 @@ func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) ( dl.Verify = downloader.VerifyAlways } if c.RepoURL != "" { - chartURL, err := repo.FindChartInAuthAndTLSRepoURL(c.RepoURL, c.Username, c.Password, name, version, - c.CertFile, c.KeyFile, c.CaFile, c.InsecureSkipTLSverify, getter.All(settings)) + chartURL, err := repo.FindChartInAuthAndTLSAndPassRepoURL(c.RepoURL, c.Username, c.Password, name, version, + c.CertFile, c.KeyFile, c.CaFile, c.InsecureSkipTLSverify, c.PassCredentialsAll, getter.All(settings)) if err != nil { return "", err } name = chartURL + + // Only pass the user/pass on when the user has said to or when the + // location of the chart repo and the chart are the same domain. + u1, err := url.Parse(c.RepoURL) + if err != nil { + return "", err + } + u2, err := url.Parse(chartURL) + if err != nil { + return "", err + } + + // Host on URL (returned from url.Parse) contains the port if present. + // This check ensures credentials are not passed between different + // services on different ports. + if c.PassCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) { + dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password)) + } else { + dl.Options = append(dl.Options, getter.WithBasicAuth("", "")) + } + } else { + dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password)) } if err := os.MkdirAll(settings.RepositoryCache, 0755); err != nil { diff --git a/vendor/helm.sh/helm/v3/pkg/action/pull.go b/vendor/helm.sh/helm/v3/pkg/action/pull.go index 04faa3b6bb..fa1247054b 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/pull.go +++ b/vendor/helm.sh/helm/v3/pkg/action/pull.go @@ -82,6 +82,7 @@ func (p *Pull) Run(chartRef string) (string, error) { Getters: getter.All(p.Settings), Options: []getter.Option{ getter.WithBasicAuth(p.Username, p.Password), + getter.WithPassCredentialsAll(p.PassCredentialsAll), getter.WithTLSClientConfig(p.CertFile, p.KeyFile, p.CaFile), getter.WithInsecureSkipVerifyTLS(p.InsecureSkipTLSverify), }, @@ -118,7 +119,7 @@ func (p *Pull) Run(chartRef string) (string, error) { } if p.RepoURL != "" { - chartURL, err := repo.FindChartInAuthAndTLSRepoURL(p.RepoURL, p.Username, p.Password, chartRef, p.Version, p.CertFile, p.KeyFile, p.CaFile, p.InsecureSkipTLSverify, getter.All(p.Settings)) + chartURL, err := repo.FindChartInAuthAndTLSAndPassRepoURL(p.RepoURL, p.Username, p.Password, chartRef, p.Version, p.CertFile, p.KeyFile, p.CaFile, p.InsecureSkipTLSverify, p.PassCredentialsAll, getter.All(p.Settings)) if err != nil { return out.String(), err } diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go b/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go index 6c600bebb7..2c0d55a556 100644 --- a/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go +++ b/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go @@ -195,6 +195,7 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, er c.Options = append( c.Options, getter.WithBasicAuth(rc.Username, rc.Password), + getter.WithPassCredentialsAll(rc.PassCredentialsAll), ) } return u, nil @@ -224,7 +225,10 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, er c.Options = append(c.Options, getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile)) } if r.Config.Username != "" && r.Config.Password != "" { - c.Options = append(c.Options, getter.WithBasicAuth(r.Config.Username, r.Config.Password)) + c.Options = append(c.Options, + getter.WithBasicAuth(r.Config.Username, r.Config.Password), + getter.WithPassCredentialsAll(r.Config.PassCredentialsAll), + ) } } diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/manager.go b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go index e89ac7c024..e0fd8a4220 100644 --- a/vendor/helm.sh/helm/v3/pkg/downloader/manager.go +++ b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go @@ -310,7 +310,7 @@ func (m *Manager) downloadAll(deps []*chart.Dependency) error { // Any failure to resolve/download a chart should fail: // https://github.com/helm/helm/issues/1439 - churl, username, password, err := m.findChartURL(dep.Name, dep.Version, dep.Repository, repos) + churl, username, password, passcredentialsall, err := m.findChartURL(dep.Name, dep.Version, dep.Repository, repos) if err != nil { saveError = errors.Wrapf(err, "could not find %s", churl) break @@ -332,6 +332,7 @@ func (m *Manager) downloadAll(deps []*chart.Dependency) error { Getters: m.Getters, Options: []getter.Option{ getter.WithBasicAuth(username, password), + getter.WithPassCredentialsAll(passcredentialsall), }, } @@ -685,9 +686,9 @@ func (m *Manager) parallelRepoUpdate(repos []*repo.Entry) error { // repoURL is the repository to search // // If it finds a URL that is "relative", it will prepend the repoURL. -func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*repo.ChartRepository) (url, username, password string, err error) { +func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*repo.ChartRepository) (url, username, password string, passcredentialsall bool, err error) { if strings.HasPrefix(repoURL, "oci://") { - return fmt.Sprintf("%s/%s:%s", repoURL, name, version), "", "", nil + return fmt.Sprintf("%s/%s:%s", repoURL, name, version), "", "", false, nil } for _, cr := range repos { @@ -709,15 +710,16 @@ func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]* } username = cr.Config.Username password = cr.Config.Password + passcredentialsall = cr.Config.PassCredentialsAll return } } url, err = repo.FindChartInRepoURL(repoURL, name, version, "", "", "", m.Getters) if err == nil { - return url, username, password, err + return url, username, password, false, err } err = errors.Errorf("chart %s not found in %s: %s", name, repoURL, err) - return url, username, password, err + return url, username, password, false, err } // findEntryByName finds an entry in the chart repository whose name matches the given name. diff --git a/vendor/helm.sh/helm/v3/pkg/getter/getter.go b/vendor/helm.sh/helm/v3/pkg/getter/getter.go index 4653484560..78add728ad 100644 --- a/vendor/helm.sh/helm/v3/pkg/getter/getter.go +++ b/vendor/helm.sh/helm/v3/pkg/getter/getter.go @@ -38,6 +38,7 @@ type options struct { insecureSkipVerifyTLS bool username string password string + passCredentialsAll bool userAgent string version string registryClient *registry.Client @@ -64,6 +65,12 @@ func WithBasicAuth(username, password string) Option { } } +func WithPassCredentialsAll(pass bool) Option { + return func(opts *options) { + opts.passCredentialsAll = pass + } +} + // WithUserAgent sets the request's User-Agent header to use the provided agent name. func WithUserAgent(userAgent string) Option { return func(opts *options) { diff --git a/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go b/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go index bd60629ae8..822abad2ef 100644 --- a/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go +++ b/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go @@ -20,6 +20,7 @@ import ( "crypto/tls" "io" "net/http" + "net/url" "github.com/pkg/errors" @@ -56,8 +57,24 @@ func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) { req.Header.Set("User-Agent", g.opts.userAgent) } - if g.opts.username != "" && g.opts.password != "" { - req.SetBasicAuth(g.opts.username, g.opts.password) + // Before setting the basic auth credentials, make sure the URL associated + // with the basic auth is the one being fetched. + u1, err := url.Parse(g.opts.url) + if err != nil { + return buf, errors.Wrap(err, "Unable to parse getter URL") + } + u2, err := url.Parse(href) + if err != nil { + return buf, errors.Wrap(err, "Unable to parse URL getting from") + } + + // Host on URL (returned from url.Parse) contains the port if present. + // This check ensures credentials are not passed between different + // services on different ports. + if g.opts.passCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) { + if g.opts.username != "" && g.opts.password != "" { + req.SetBasicAuth(g.opts.username, g.opts.password) + } } client, err := g.httpClient() diff --git a/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go b/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go index 09b94fd42b..67ede93fd8 100644 --- a/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go +++ b/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go @@ -48,6 +48,7 @@ type Entry struct { KeyFile string `json:"keyFile"` CAFile string `json:"caFile"` InsecureSkipTLSverify bool `json:"insecure_skip_tls_verify"` + PassCredentialsAll bool `json:"pass_credentials_all"` } // ChartRepository represents a chart repository @@ -129,6 +130,7 @@ func (r *ChartRepository) DownloadIndexFile() (string, error) { getter.WithInsecureSkipVerifyTLS(r.Config.InsecureSkipTLSverify), getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile), getter.WithBasicAuth(r.Config.Username, r.Config.Password), + getter.WithPassCredentialsAll(r.Config.PassCredentialsAll), ) if err != nil { return "", err @@ -217,6 +219,15 @@ func FindChartInAuthRepoURL(repoURL, username, password, chartName, chartVersion // but it also receives credentials and TLS verify flag for the chart repository. // TODO Helm 4, FindChartInAuthAndTLSRepoURL should be integrated into FindChartInAuthRepoURL. func FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify bool, getters getter.Providers) (string, error) { + return FindChartInAuthAndTLSAndPassRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile, false, false, getters) +} + +// FindChartInAuthAndTLSAndPassRepoURL finds chart in chart repository pointed by repoURL +// without adding repo to repositories, like FindChartInRepoURL, +// but it also receives credentials, TLS verify flag, and if credentials should +// be passed on to other domains. +// TODO Helm 4, FindChartInAuthAndTLSAndPassRepoURL should be integrated into FindChartInAuthRepoURL. +func FindChartInAuthAndTLSAndPassRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify, passCredentialsAll bool, getters getter.Providers) (string, error) { // Download and write the index file to a temporary location buf := make([]byte, 20) @@ -227,6 +238,7 @@ func FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartV URL: repoURL, Username: username, Password: password, + PassCredentialsAll: passCredentialsAll, CertFile: certFile, KeyFile: keyFile, CAFile: caFile, diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE b/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS b/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE b/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS b/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/cloud-provider/go.mod b/vendor/k8s.io/cloud-provider/go.mod index c89e3fbc58..12681e47a4 100644 --- a/vendor/k8s.io/cloud-provider/go.mod +++ b/vendor/k8s.io/cloud-provider/go.mod @@ -5,25 +5,25 @@ module k8s.io/cloud-provider go 1.16 require ( - github.com/google/go-cmp v0.5.4 + github.com/google/go-cmp v0.5.5 github.com/spf13/cobra v1.1.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.6.1 - k8s.io/api v0.21.4 - k8s.io/apimachinery v0.21.4 - k8s.io/apiserver v0.21.4 - k8s.io/client-go v0.21.4 - k8s.io/component-base v0.21.4 - k8s.io/controller-manager v0.21.4 + k8s.io/api v0.21.5 + k8s.io/apimachinery v0.21.5 + k8s.io/apiserver v0.21.5 + k8s.io/client-go v0.21.5 + k8s.io/component-base v0.21.5 + k8s.io/controller-manager v0.21.5 k8s.io/klog/v2 v2.8.0 k8s.io/utils v0.0.0-20201110183641-67b214c5f920 ) replace ( - k8s.io/api => k8s.io/api v0.21.4 - k8s.io/apimachinery => k8s.io/apimachinery v0.21.4 - k8s.io/apiserver => k8s.io/apiserver v0.21.4 - k8s.io/client-go => k8s.io/client-go v0.21.4 - k8s.io/component-base => k8s.io/component-base v0.21.4 - k8s.io/controller-manager => k8s.io/controller-manager v0.21.4 + k8s.io/api => k8s.io/api v0.21.5 + k8s.io/apimachinery => k8s.io/apimachinery v0.21.5 + k8s.io/apiserver => k8s.io/apiserver v0.21.5 + k8s.io/client-go => k8s.io/client-go v0.21.5 + k8s.io/component-base => k8s.io/component-base v0.21.5 + k8s.io/controller-manager => k8s.io/controller-manager v0.21.5 ) diff --git a/vendor/k8s.io/cloud-provider/go.sum b/vendor/k8s.io/cloud-provider/go.sum index 3580e1c154..515a784e90 100644 --- a/vendor/k8s.io/cloud-provider/go.sum +++ b/vendor/k8s.io/cloud-provider/go.sum @@ -160,8 +160,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -169,9 +169,8 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -340,8 +339,8 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= @@ -628,8 +627,9 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -668,18 +668,18 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.21.4 h1:WtDkzTAuI31WZKDPeIYpEUA+WeUfXAmA7gwj6nzFfbc= -k8s.io/api v0.21.4/go.mod h1:fTVGP+M4D8+00FN2cMnJqk/eb/GH53bvmNs2SVTmpFk= -k8s.io/apimachinery v0.21.4 h1:KDq0lWZVslHkuE5I7iGAQHwpK0aDTlar1E7IWEc4CNw= -k8s.io/apimachinery v0.21.4/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= -k8s.io/apiserver v0.21.4 h1:egJgdhW0ueq5iJSY0c5YedPvRM2Ft/D3dcXOgwvs9jY= -k8s.io/apiserver v0.21.4/go.mod h1:SErUuFBBPZUcD2nsUU8hItxoYheqyYr2o/pCINEPW8g= -k8s.io/client-go v0.21.4 h1:tcwj167If+v+pIGrCjaPG7hFo6SqFPFCCgMJy+Vm8Jc= -k8s.io/client-go v0.21.4/go.mod h1:t0/eMKyUAq/DoQ7vW8NVVA00/nomlwC+eInsS8PxSew= -k8s.io/component-base v0.21.4 h1:Bc0AttSyhJFVXEIHz+VX+D11j/5z7SPPhl6whiXaRzs= -k8s.io/component-base v0.21.4/go.mod h1:ZKG0eHVX+tUDcaoIGpU3Vtk4TIjMddN9uhEWDmW6Nyg= -k8s.io/controller-manager v0.21.4 h1:XzQn1SnU0rMYQN91CiKuhXTKMYOmp9yqmECZ/uPRlgs= -k8s.io/controller-manager v0.21.4/go.mod h1:a/iL7W19zkyirHDaupk9cyC11nejVznGwZI6I8tbyQY= +k8s.io/api v0.21.5 h1:9zp3SslPRB+rqxhGKqqTo6VsN3HX0Ype1nWV6UQQ+Sk= +k8s.io/api v0.21.5/go.mod h1:Un8C5Hemo2r3MfPOjZvwQQ9KkBbiTBUCGrjlivo9uJ0= +k8s.io/apimachinery v0.21.5 h1:56bnsHcUNboSCbD779GGi4Lh5kHTDFUoDrnHbhLTiaw= +k8s.io/apimachinery v0.21.5/go.mod h1:3PfBV+4PPXNs0aueD+7fHcGyhdkFFYqXeshQtsKCi+4= +k8s.io/apiserver v0.21.5 h1:iEPvJ2uwmyb7C4eScOj1fgPKCyCUGgMQU5+UREE87vE= +k8s.io/apiserver v0.21.5/go.mod h1:0bWmrAx3dxUUFSEw71U91Si5obhIvBcAmf8oVZUO58E= +k8s.io/client-go v0.21.5 h1:zkVidiWVgciPKYqWpMFMjCUF+4rRXcfkKoyQS1Ue21k= +k8s.io/client-go v0.21.5/go.mod h1:EUornVlr3rBrPKXUoMPNggJdEQmvFNMpYO3Kb6432kw= +k8s.io/component-base v0.21.5 h1:icFqcFDrO9S+FQpGohzVm6qce9vlo131K0r3NhElxiQ= +k8s.io/component-base v0.21.5/go.mod h1:UyRaqQfPkBL/haEFaMWgVQvtom5TqAT+jqlFGlh6LuU= +k8s.io/controller-manager v0.21.5 h1:4BECve2i35C9lSV7Eyg8jAtakcbPsJNOGT7iGyyWc4Y= +k8s.io/controller-manager v0.21.5/go.mod h1:65guJmOoIEklTPwwYOmiHOsbEoAnQ7xKSZ9v+RdJC6g= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= diff --git a/vendor/k8s.io/code-generator/go.mod b/vendor/k8s.io/code-generator/go.mod index 59b6c8a156..d6b20bec4b 100644 --- a/vendor/k8s.io/code-generator/go.mod +++ b/vendor/k8s.io/code-generator/go.mod @@ -8,8 +8,6 @@ require ( github.com/emicklei/go-restful v2.9.5+incompatible // indirect github.com/go-openapi/spec v0.19.5 github.com/gogo/protobuf v1.3.2 - github.com/golang/protobuf v1.4.3 // indirect - github.com/google/go-cmp v0.5.4 // indirect github.com/googleapis/gnostic v0.4.1 github.com/json-iterator/go v1.1.10 // indirect github.com/kr/text v0.2.0 // indirect @@ -24,7 +22,7 @@ require ( golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 // indirect golang.org/x/text v0.3.4 // indirect golang.org/x/tools v0.1.0 // indirect - google.golang.org/protobuf v1.25.0 // indirect + google.golang.org/protobuf v1.26.0 // indirect gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect gopkg.in/yaml.v2 v2.4.0 k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 diff --git a/vendor/k8s.io/code-generator/go.sum b/vendor/k8s.io/code-generator/go.sum index 50afc6f97a..bea8fa8872 100644 --- a/vendor/k8s.io/code-generator/go.sum +++ b/vendor/k8s.io/code-generator/go.sum @@ -1,13 +1,9 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -16,8 +12,6 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= @@ -37,26 +31,13 @@ github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tF github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -100,7 +81,6 @@ github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -115,18 +95,11 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 h1:xUIPaMhvROX9dhPvRCenIJtU78+lbEenGbgqB5hfHCQ= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -137,13 +110,10 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -161,10 +131,6 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -177,24 +143,9 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -212,8 +163,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= diff --git a/vendor/k8s.io/code-generator/third_party/forked/golang/LICENSE b/vendor/k8s.io/code-generator/third_party/forked/golang/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/k8s.io/code-generator/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/code-generator/third_party/forked/golang/PATENTS b/vendor/k8s.io/code-generator/third_party/forked/golang/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/k8s.io/code-generator/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/klog/v2/go.mod b/vendor/k8s.io/klog/v2/go.mod index e396e31c06..eb297b6a1e 100644 --- a/vendor/k8s.io/klog/v2/go.mod +++ b/vendor/k8s.io/klog/v2/go.mod @@ -2,4 +2,4 @@ module k8s.io/klog/v2 go 1.13 -require github.com/go-logr/logr v0.2.0 +require github.com/go-logr/logr v0.4.0 diff --git a/vendor/k8s.io/klog/v2/go.sum b/vendor/k8s.io/klog/v2/go.sum index 8dfa785428..5778f81742 100644 --- a/vendor/k8s.io/klog/v2/go.sum +++ b/vendor/k8s.io/klog/v2/go.sum @@ -1,2 +1,2 @@ -github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 23cced6250..25483fad13 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -81,6 +81,7 @@ import ( "math" "os" "path/filepath" + "reflect" "runtime" "strconv" "strings" @@ -433,7 +434,7 @@ func InitFlags(flagset *flag.FlagSet) { flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages") flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") - flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level") + flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level)") flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") @@ -772,7 +773,7 @@ func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFil } // if loggr is specified, will call loggr.Error, otherwise output with logging module. -func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, msg string, keysAndValues ...interface{}) { +func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } @@ -780,11 +781,11 @@ func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, msg st loggr.Error(err, msg, keysAndValues...) return } - l.printS(err, msg, keysAndValues...) + l.printS(err, errorLog, depth+1, msg, keysAndValues...) } // if loggr is specified, will call loggr.Info, otherwise output with logging module. -func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, msg string, keysAndValues ...interface{}) { +func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } @@ -792,12 +793,12 @@ func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, msg string, keysAn loggr.Info(msg, keysAndValues...) return } - l.printS(nil, msg, keysAndValues...) + l.printS(nil, infoLog, depth+1, msg, keysAndValues...) } // printS is called from infoS and errorS if loggr is not specified. -// if err arguments is specified, will output to errorLog severity -func (l *loggingT) printS(err error, msg string, keysAndValues ...interface{}) { +// set log severity by s +func (l *loggingT) printS(err error, s severity, depth int, msg string, keysAndValues ...interface{}) { b := &bytes.Buffer{} b.WriteString(fmt.Sprintf("%q", msg)) if err != nil { @@ -805,13 +806,7 @@ func (l *loggingT) printS(err error, msg string, keysAndValues ...interface{}) { b.WriteString(fmt.Sprintf("err=%q", err.Error())) } kvListFormat(b, keysAndValues...) - var s severity - if err == nil { - s = infoLog - } else { - s = errorLog - } - l.printDepth(s, logging.logr, nil, 2, b) + l.printDepth(s, logging.logr, nil, depth+1, b) } const missingValue = "(MISSING)" @@ -1359,14 +1354,20 @@ func (v Verbose) Infof(format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, v.filter, msg, keysAndValues...) + logging.infoS(v.logr, v.filter, 0, msg, keysAndValues...) } } +// InfoSDepth acts as InfoS but uses depth to determine which call frame to log. +// InfoSDepth(0, "msg") is the same as InfoS("msg"). +func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { + logging.infoS(logging.logr, logging.filter, depth, msg, keysAndValues...) +} + // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, v.filter, msg, args...) + logging.errorS(err, v.logr, v.filter, 0, msg, args...) } } @@ -1374,7 +1375,7 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, v.filter, msg, keysAndValues...) + logging.errorS(err, v.logr, v.filter, 0, msg, keysAndValues...) } } @@ -1411,7 +1412,7 @@ func Infof(format string, args ...interface{}) { // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, logging.filter, msg, keysAndValues...) + logging.infoS(logging.logr, logging.filter, 0, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. @@ -1472,7 +1473,13 @@ func Errorf(format string, args ...interface{}) { // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, logging.filter, msg, keysAndValues...) + logging.errorS(err, logging.logr, logging.filter, 0, msg, keysAndValues...) +} + +// ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log. +// ErrorSDepth(0, "msg") is the same as ErrorS("msg"). +func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) { + logging.errorS(err, logging.logr, logging.filter, depth, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, @@ -1571,6 +1578,13 @@ type KMetadata interface { // KObj returns ObjectRef from ObjectMeta func KObj(obj KMetadata) ObjectRef { + if obj == nil { + return ObjectRef{} + } + if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() { + return ObjectRef{} + } + return ObjectRef{ Name: obj.GetName(), Namespace: obj.GetNamespace(), diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go index 1140f75ce5..84cdf5e105 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go @@ -209,8 +209,9 @@ func doBindSubPath(mounter mount.Interface, subpath Subpath) (hostPath string, e // Do the bind mount options := []string{"bind"} + mountFlags := []string{"--no-canonicalize"} klog.V(5).Infof("bind mounting %q at %q", mountSource, bindPathTarget) - if err = mounter.MountSensitiveWithoutSystemd(mountSource, bindPathTarget, "" /*fstype*/, options, nil); err != nil { + if err = mounter.MountSensitiveWithoutSystemdWithMountFlags(mountSource, bindPathTarget, "" /*fstype*/, options, nil /* sensitiveOptions */, mountFlags); err != nil { return "", fmt.Errorf("error mounting %s: %s", subpath.Path, err) } success = true diff --git a/vendor/k8s.io/mount-utils/fake_mounter.go b/vendor/k8s.io/mount-utils/fake_mounter.go index 393ed043ba..55ea5e2986 100644 --- a/vendor/k8s.io/mount-utils/fake_mounter.go +++ b/vendor/k8s.io/mount-utils/fake_mounter.go @@ -136,6 +136,10 @@ func (f *FakeMounter) MountSensitiveWithoutSystemd(source string, target string, return f.MountSensitive(source, target, fstype, options, nil /* sensitiveOptions */) } +func (f *FakeMounter) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error { + return f.MountSensitive(source, target, fstype, options, nil /* sensitiveOptions */) +} + // Unmount records the unmount event and updates the in-memory mount points for FakeMounter func (f *FakeMounter) Unmount(target string) error { f.mutex.Lock() diff --git a/vendor/k8s.io/mount-utils/mount.go b/vendor/k8s.io/mount-utils/mount.go index 93b60d3f92..a882fcc739 100644 --- a/vendor/k8s.io/mount-utils/mount.go +++ b/vendor/k8s.io/mount-utils/mount.go @@ -49,6 +49,8 @@ type Interface interface { MountSensitive(source string, target string, fstype string, options []string, sensitiveOptions []string) error // MountSensitiveWithoutSystemd is the same as MountSensitive() but this method disable using systemd mount. MountSensitiveWithoutSystemd(source string, target string, fstype string, options []string, sensitiveOptions []string) error + // MountSensitiveWithoutSystemdWithMountFlags is the same as MountSensitiveWithoutSystemd() with additional mount flags + MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error // Unmount unmounts given target. Unmount(target string) error // List returns a list of all mounted filesystems. This can be large. diff --git a/vendor/k8s.io/mount-utils/mount_linux.go b/vendor/k8s.io/mount-utils/mount_linux.go index 10a1c3f010..7097eae087 100644 --- a/vendor/k8s.io/mount-utils/mount_linux.go +++ b/vendor/k8s.io/mount-utils/mount_linux.go @@ -87,11 +87,11 @@ func (mounter *Mounter) MountSensitive(source string, target string, fstype stri mounterPath := "" bind, bindOpts, bindRemountOpts, bindRemountOptsSensitive := MakeBindOptsSensitive(options, sensitiveOptions) if bind { - err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts, bindRemountOptsSensitive, true) + err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts, bindRemountOptsSensitive, nil /* mountFlags */, true) if err != nil { return err } - return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, true) + return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, nil /* mountFlags */, true) } // The list of filesystems that require containerized mounter on GCI image cluster fsTypesNeedMounter := map[string]struct{}{ @@ -103,19 +103,24 @@ func (mounter *Mounter) MountSensitive(source string, target string, fstype stri if _, ok := fsTypesNeedMounter[fstype]; ok { mounterPath = mounter.mounterPath } - return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options, sensitiveOptions, true) + return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options, sensitiveOptions, nil /* mountFlags */, true) } // MountSensitiveWithoutSystemd is the same as MountSensitive() but disable using systemd mount. func (mounter *Mounter) MountSensitiveWithoutSystemd(source string, target string, fstype string, options []string, sensitiveOptions []string) error { + return mounter.MountSensitiveWithoutSystemdWithMountFlags(source, target, fstype, options, sensitiveOptions, nil /* mountFlags */) +} + +// MountSensitiveWithoutSystemdWithMountFlags is the same as MountSensitiveWithoutSystemd with additional mount flags. +func (mounter *Mounter) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error { mounterPath := "" bind, bindOpts, bindRemountOpts, bindRemountOptsSensitive := MakeBindOptsSensitive(options, sensitiveOptions) if bind { - err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts, bindRemountOptsSensitive, false) + err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts, bindRemountOptsSensitive, mountFlags, false) if err != nil { return err } - return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, false) + return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, mountFlags, false) } // The list of filesystems that require containerized mounter on GCI image cluster fsTypesNeedMounter := map[string]struct{}{ @@ -127,14 +132,14 @@ func (mounter *Mounter) MountSensitiveWithoutSystemd(source string, target strin if _, ok := fsTypesNeedMounter[fstype]; ok { mounterPath = mounter.mounterPath } - return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options, sensitiveOptions, false) + return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options, sensitiveOptions, mountFlags, false) } // doMount runs the mount command. mounterPath is the path to mounter binary if containerized mounter is used. // sensitiveOptions is an extension of options except they will not be logged (because they may contain sensitive material) // systemdMountRequired is an extension of option to decide whether uses systemd mount. -func (mounter *Mounter) doMount(mounterPath string, mountCmd string, source string, target string, fstype string, options []string, sensitiveOptions []string, systemdMountRequired bool) error { - mountArgs, mountArgsLogStr := MakeMountArgsSensitive(source, target, fstype, options, sensitiveOptions) +func (mounter *Mounter) doMount(mounterPath string, mountCmd string, source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string, systemdMountRequired bool) error { + mountArgs, mountArgsLogStr := MakeMountArgsSensitiveWithMountFlags(source, target, fstype, options, sensitiveOptions, mountFlags) if len(mounterPath) > 0 { mountArgs = append([]string{mountCmd}, mountArgs...) mountArgsLogStr = mountCmd + " " + mountArgsLogStr @@ -217,10 +222,22 @@ func MakeMountArgs(source, target, fstype string, options []string) (mountArgs [ // MakeMountArgsSensitive makes the arguments to the mount(8) command. // sensitiveOptions is an extension of options except they will not be logged (because they may contain sensitive material) func MakeMountArgsSensitive(source, target, fstype string, options []string, sensitiveOptions []string) (mountArgs []string, mountArgsLogStr string) { + return MakeMountArgsSensitiveWithMountFlags(source, target, fstype, options, sensitiveOptions, nil /* mountFlags */) +} + +// MakeMountArgsSensitiveWithMountFlags makes the arguments to the mount(8) command. +// sensitiveOptions is an extension of options except they will not be logged (because they may contain sensitive material) +// mountFlags are additional mount flags that are not related with the fstype +// and mount options +func MakeMountArgsSensitiveWithMountFlags(source, target, fstype string, options []string, sensitiveOptions []string, mountFlags []string) (mountArgs []string, mountArgsLogStr string) { // Build mount command as follows: - // mount [-t $fstype] [-o $options] [$source] $target + // mount [$mountFlags] [-t $fstype] [-o $options] [$source] $target mountArgs = []string{} mountArgsLogStr = "" + + mountArgs = append(mountArgs, mountFlags...) + mountArgsLogStr += strings.Join(mountFlags, " ") + if len(fstype) > 0 { mountArgs = append(mountArgs, "-t", fstype) mountArgsLogStr += strings.Join(mountArgs, " ") diff --git a/vendor/k8s.io/mount-utils/mount_unsupported.go b/vendor/k8s.io/mount-utils/mount_unsupported.go index 0e8e683ae3..d2aac9a748 100644 --- a/vendor/k8s.io/mount-utils/mount_unsupported.go +++ b/vendor/k8s.io/mount-utils/mount_unsupported.go @@ -53,6 +53,11 @@ func (mounter *Mounter) MountSensitiveWithoutSystemd(source string, target strin return errUnsupported } +// MountSensitiveWithoutSystemdWithMountFlags always returns an error on unsupported platforms +func (mounter *Mounter) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error { + return errUnsupported +} + // Unmount always returns an error on unsupported platforms func (mounter *Mounter) Unmount(target string) error { return errUnsupported diff --git a/vendor/k8s.io/mount-utils/mount_windows.go b/vendor/k8s.io/mount-utils/mount_windows.go index 29d3bbbd37..a893f52131 100644 --- a/vendor/k8s.io/mount-utils/mount_windows.go +++ b/vendor/k8s.io/mount-utils/mount_windows.go @@ -64,6 +64,12 @@ func (mounter *Mounter) MountSensitiveWithoutSystemd(source string, target strin return mounter.MountSensitive(source, target, fstype, options, sensitiveOptions /* sensitiveOptions */) } +// MountSensitiveWithoutSystemdWithMountFlags is the same as MountSensitiveWithoutSystemd with additional mount flags +// Windows not supported systemd mount, this function degrades to MountSensitive(). +func (mounter *Mounter) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error { + return mounter.MountSensitive(source, target, fstype, options, sensitiveOptions /* sensitiveOptions */) +} + // MountSensitive is the same as Mount() but this method allows // sensitiveOptions to be passed in a separate parameter from the normal // mount options and ensures the sensitiveOptions are never logged. This diff --git a/vendor/k8s.io/utils/internal/third_party/forked/golang/LICENSE b/vendor/k8s.io/utils/internal/third_party/forked/golang/LICENSE new file mode 100644 index 0000000000..7448756763 --- /dev/null +++ b/vendor/k8s.io/utils/internal/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/utils/internal/third_party/forked/golang/PATENTS b/vendor/k8s.io/utils/internal/third_party/forked/golang/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/k8s.io/utils/internal/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/utils/internal/third_party/forked/golang/net/ip.go b/vendor/k8s.io/utils/internal/third_party/forked/golang/net/ip.go new file mode 100644 index 0000000000..4340b6e748 --- /dev/null +++ b/vendor/k8s.io/utils/internal/third_party/forked/golang/net/ip.go @@ -0,0 +1,236 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IP address manipulations +// +// IPv4 addresses are 4 bytes; IPv6 addresses are 16 bytes. +// An IPv4 address can be converted to an IPv6 address by +// adding a canonical prefix (10 zeros, 2 0xFFs). +// This library accepts either size of byte slice but always +// returns 16-byte addresses. + +package net + +/////////////////////////////////////////////////////////////////////////////// +// NOTE: This file was forked because we need to maintain backwards-compatible +// IP parsing logic, which was changed in a correct but incompatible way in +// go-1.17. +// +// See https://issue.k8s.io/100895 +/////////////////////////////////////////////////////////////////////////////// + +import ( + stdnet "net" +) + +// +// Lean on the standard net lib as much as possible. +// + +type IP = stdnet.IP +type IPNet = stdnet.IPNet +type ParseError = stdnet.ParseError + +const IPv4len = stdnet.IPv4len +const IPv6len = stdnet.IPv6len + +var CIDRMask = stdnet.CIDRMask +var IPv4 = stdnet.IPv4 + +// Parse IPv4 address (d.d.d.d). +func parseIPv4(s string) IP { + var p [IPv4len]byte + for i := 0; i < IPv4len; i++ { + if len(s) == 0 { + // Missing octets. + return nil + } + if i > 0 { + if s[0] != '.' { + return nil + } + s = s[1:] + } + n, c, ok := dtoi(s) + if !ok || n > 0xFF { + return nil + } + // + // NOTE: This correct check was added for go-1.17, but is a + // backwards-incompatible change for kubernetes users, who might have + // stored data which uses these leading zeroes already. + // + // See https://issue.k8s.io/100895 + // + //if c > 1 && s[0] == '0' { + // // Reject non-zero components with leading zeroes. + // return nil + //} + s = s[c:] + p[i] = byte(n) + } + if len(s) != 0 { + return nil + } + return IPv4(p[0], p[1], p[2], p[3]) +} + +// parseIPv6 parses s as a literal IPv6 address described in RFC 4291 +// and RFC 5952. +func parseIPv6(s string) (ip IP) { + ip = make(IP, IPv6len) + ellipsis := -1 // position of ellipsis in ip + + // Might have leading ellipsis + if len(s) >= 2 && s[0] == ':' && s[1] == ':' { + ellipsis = 0 + s = s[2:] + // Might be only ellipsis + if len(s) == 0 { + return ip + } + } + + // Loop, parsing hex numbers followed by colon. + i := 0 + for i < IPv6len { + // Hex number. + n, c, ok := xtoi(s) + if !ok || n > 0xFFFF { + return nil + } + + // If followed by dot, might be in trailing IPv4. + if c < len(s) && s[c] == '.' { + if ellipsis < 0 && i != IPv6len-IPv4len { + // Not the right place. + return nil + } + if i+IPv4len > IPv6len { + // Not enough room. + return nil + } + ip4 := parseIPv4(s) + if ip4 == nil { + return nil + } + ip[i] = ip4[12] + ip[i+1] = ip4[13] + ip[i+2] = ip4[14] + ip[i+3] = ip4[15] + s = "" + i += IPv4len + break + } + + // Save this 16-bit chunk. + ip[i] = byte(n >> 8) + ip[i+1] = byte(n) + i += 2 + + // Stop at end of string. + s = s[c:] + if len(s) == 0 { + break + } + + // Otherwise must be followed by colon and more. + if s[0] != ':' || len(s) == 1 { + return nil + } + s = s[1:] + + // Look for ellipsis. + if s[0] == ':' { + if ellipsis >= 0 { // already have one + return nil + } + ellipsis = i + s = s[1:] + if len(s) == 0 { // can be at end + break + } + } + } + + // Must have used entire string. + if len(s) != 0 { + return nil + } + + // If didn't parse enough, expand ellipsis. + if i < IPv6len { + if ellipsis < 0 { + return nil + } + n := IPv6len - i + for j := i - 1; j >= ellipsis; j-- { + ip[j+n] = ip[j] + } + for j := ellipsis + n - 1; j >= ellipsis; j-- { + ip[j] = 0 + } + } else if ellipsis >= 0 { + // Ellipsis must represent at least one 0 group. + return nil + } + return ip +} + +// ParseIP parses s as an IP address, returning the result. +// The string s can be in IPv4 dotted decimal ("192.0.2.1"), IPv6 +// ("2001:db8::68"), or IPv4-mapped IPv6 ("::ffff:192.0.2.1") form. +// If s is not a valid textual representation of an IP address, +// ParseIP returns nil. +func ParseIP(s string) IP { + for i := 0; i < len(s); i++ { + switch s[i] { + case '.': + return parseIPv4(s) + case ':': + return parseIPv6(s) + } + } + return nil +} + +// ParseCIDR parses s as a CIDR notation IP address and prefix length, +// like "192.0.2.0/24" or "2001:db8::/32", as defined in +// RFC 4632 and RFC 4291. +// +// It returns the IP address and the network implied by the IP and +// prefix length. +// For example, ParseCIDR("192.0.2.1/24") returns the IP address +// 192.0.2.1 and the network 192.0.2.0/24. +func ParseCIDR(s string) (IP, *IPNet, error) { + i := indexByteString(s, '/') + if i < 0 { + return nil, nil, &ParseError{Type: "CIDR address", Text: s} + } + addr, mask := s[:i], s[i+1:] + iplen := IPv4len + ip := parseIPv4(addr) + if ip == nil { + iplen = IPv6len + ip = parseIPv6(addr) + } + n, i, ok := dtoi(mask) + if ip == nil || !ok || i != len(mask) || n < 0 || n > 8*iplen { + return nil, nil, &ParseError{Type: "CIDR address", Text: s} + } + m := CIDRMask(n, 8*iplen) + return ip, &IPNet{IP: ip.Mask(m), Mask: m}, nil +} + +// This is copied from go/src/internal/bytealg, which includes versions +// optimized for various platforms. Those optimizations are elided here so we +// don't have to maintain them. +func indexByteString(s string, c byte) int { + for i := 0; i < len(s); i++ { + if s[i] == c { + return i + } + } + return -1 +} diff --git a/vendor/k8s.io/utils/internal/third_party/forked/golang/net/parse.go b/vendor/k8s.io/utils/internal/third_party/forked/golang/net/parse.go new file mode 100644 index 0000000000..cc2fdcb958 --- /dev/null +++ b/vendor/k8s.io/utils/internal/third_party/forked/golang/net/parse.go @@ -0,0 +1,59 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Simple file i/o and string manipulation, to avoid +// depending on strconv and bufio and strings. + +package net + +/////////////////////////////////////////////////////////////////////////////// +// NOTE: This file was forked because it is used by other code that needed to +// be forked, not because it is used on its own. +/////////////////////////////////////////////////////////////////////////////// + +// Bigger than we need, not too big to worry about overflow +const big = 0xFFFFFF + +// Decimal to integer. +// Returns number, characters consumed, success. +func dtoi(s string) (n int, i int, ok bool) { + n = 0 + for i = 0; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { + n = n*10 + int(s[i]-'0') + if n >= big { + return big, i, false + } + } + if i == 0 { + return 0, 0, false + } + return n, i, true +} + +// Hexadecimal to integer. +// Returns number, characters consumed, success. +func xtoi(s string) (n int, i int, ok bool) { + n = 0 + for i = 0; i < len(s); i++ { + if '0' <= s[i] && s[i] <= '9' { + n *= 16 + n += int(s[i] - '0') + } else if 'a' <= s[i] && s[i] <= 'f' { + n *= 16 + n += int(s[i]-'a') + 10 + } else if 'A' <= s[i] && s[i] <= 'F' { + n *= 16 + n += int(s[i]-'A') + 10 + } else { + break + } + if n >= big { + return 0, i, false + } + } + if i == 0 { + return 0, i, false + } + return n, i, true +} diff --git a/vendor/k8s.io/utils/net/ipnet.go b/vendor/k8s.io/utils/net/ipnet.go index c2e844bf5d..2f3ee37f0b 100644 --- a/vendor/k8s.io/utils/net/ipnet.go +++ b/vendor/k8s.io/utils/net/ipnet.go @@ -30,7 +30,7 @@ func ParseIPNets(specs ...string) (IPNetSet, error) { ipnetset := make(IPNetSet) for _, spec := range specs { spec = strings.TrimSpace(spec) - _, ipnet, err := net.ParseCIDR(spec) + _, ipnet, err := ParseCIDRSloppy(spec) if err != nil { return nil, err } @@ -128,7 +128,7 @@ type IPSet map[string]net.IP func ParseIPSet(items ...string) (IPSet, error) { ipset := make(IPSet) for _, item := range items { - ip := net.ParseIP(strings.TrimSpace(item)) + ip := ParseIPSloppy(strings.TrimSpace(item)) if ip == nil { return nil, fmt.Errorf("error parsing IP %q", item) } diff --git a/vendor/k8s.io/utils/net/net.go b/vendor/k8s.io/utils/net/net.go index 077e447276..b7c08e2e00 100644 --- a/vendor/k8s.io/utils/net/net.go +++ b/vendor/k8s.io/utils/net/net.go @@ -30,7 +30,7 @@ import ( func ParseCIDRs(cidrsString []string) ([]*net.IPNet, error) { cidrs := make([]*net.IPNet, 0, len(cidrsString)) for _, cidrString := range cidrsString { - _, cidr, err := net.ParseCIDR(cidrString) + _, cidr, err := ParseCIDRSloppy(cidrString) if err != nil { return nil, fmt.Errorf("failed to parse cidr value:%q with error:%v", cidrString, err) } @@ -71,7 +71,7 @@ func IsDualStackIPs(ips []net.IP) (bool, error) { func IsDualStackIPStrings(ips []string) (bool, error) { parsedIPs := make([]net.IP, 0, len(ips)) for _, ip := range ips { - parsedIP := net.ParseIP(ip) + parsedIP := ParseIPSloppy(ip) parsedIPs = append(parsedIPs, parsedIP) } return IsDualStackIPs(parsedIPs) @@ -120,14 +120,14 @@ func IsIPv6(netIP net.IP) bool { // IsIPv6String returns if ip is IPv6. func IsIPv6String(ip string) bool { - netIP := net.ParseIP(ip) + netIP := ParseIPSloppy(ip) return IsIPv6(netIP) } // IsIPv6CIDRString returns if cidr is IPv6. // This assumes cidr is a valid CIDR. func IsIPv6CIDRString(cidr string) bool { - ip, _, _ := net.ParseCIDR(cidr) + ip, _, _ := ParseCIDRSloppy(cidr) return IsIPv6(ip) } @@ -144,7 +144,7 @@ func IsIPv4(netIP net.IP) bool { // IsIPv4String returns if ip is IPv4. func IsIPv4String(ip string) bool { - netIP := net.ParseIP(ip) + netIP := ParseIPSloppy(ip) return IsIPv4(netIP) } @@ -157,7 +157,7 @@ func IsIPv4CIDR(cidr *net.IPNet) bool { // IsIPv4CIDRString returns if cidr is IPv4. // This assumes cidr is a valid CIDR. func IsIPv4CIDRString(cidr string) bool { - ip, _, _ := net.ParseCIDR(cidr) + ip, _, _ := ParseCIDRSloppy(cidr) return IsIPv4(ip) } diff --git a/vendor/k8s.io/utils/net/parse.go b/vendor/k8s.io/utils/net/parse.go new file mode 100644 index 0000000000..400d364d89 --- /dev/null +++ b/vendor/k8s.io/utils/net/parse.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + forkednet "k8s.io/utils/internal/third_party/forked/golang/net" +) + +// ParseIPSloppy is identical to Go's standard net.ParseIP, except that it allows +// leading '0' characters on numbers. Go used to allow this and then changed +// the behavior in 1.17. We're choosing to keep it for compat with potential +// stored values. +var ParseIPSloppy = forkednet.ParseIP + +// ParseCIDRSloppy is identical to Go's standard net.ParseCIDR, except that it allows +// leading '0' characters on numbers. Go used to allow this and then changed +// the behavior in 1.17. We're choosing to keep it for compat with potential +// stored values. +var ParseCIDRSloppy = forkednet.ParseCIDR diff --git a/vendor/k8s.io/utils/net/port.go b/vendor/k8s.io/utils/net/port.go index b4ff128e0d..7ac04f0dc9 100644 --- a/vendor/k8s.io/utils/net/port.go +++ b/vendor/k8s.io/utils/net/port.go @@ -71,7 +71,7 @@ func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protoco return nil, fmt.Errorf("Invalid IP family %s", ipFamily) } if ip != "" { - parsedIP := net.ParseIP(ip) + parsedIP := ParseIPSloppy(ip) if parsedIP == nil { return nil, fmt.Errorf("invalid ip address %s", ip) } diff --git a/vendor/modules.txt b/vendor/modules.txt index ecd2133802..bc6a071260 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -792,6 +792,7 @@ github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/mo # github.com/prometheus/client_golang v1.11.0 ## explicit github.com/prometheus/client_golang/prometheus +github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/testutil @@ -989,7 +990,7 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba +# golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac golang.org/x/time/rate # golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a golang.org/x/tools/cmd/goimports @@ -1166,9 +1167,10 @@ gopkg.in/square/go-jose.v2/jwt # gopkg.in/yaml.v2 v2.4.0 ## explicit gopkg.in/yaml.v2 -# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b +# gopkg.in/yaml.v3 v3.0.1 +## explicit gopkg.in/yaml.v3 -# helm.sh/helm/v3 v3.0.0-00010101000000-000000000000 => helm.sh/helm/v3 v3.6.0 +# helm.sh/helm/v3 v3.0.0-00010101000000-000000000000 => helm.sh/helm/v3 v3.6.1 helm.sh/helm/v3/internal/experimental/registry helm.sh/helm/v3/internal/fileutil helm.sh/helm/v3/internal/ignore @@ -1206,7 +1208,7 @@ helm.sh/helm/v3/pkg/storage helm.sh/helm/v3/pkg/storage/driver helm.sh/helm/v3/pkg/strvals helm.sh/helm/v3/pkg/time -# k8s.io/api v0.24.0 => k8s.io/api v0.21.4 +# k8s.io/api v0.24.0 => k8s.io/api v0.21.5 ## explicit k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1254,7 +1256,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.21.4 => k8s.io/apiextensions-apiserver v0.21.4 +# k8s.io/apiextensions-apiserver v0.21.5 => k8s.io/apiextensions-apiserver v0.21.5 ## explicit k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -1266,7 +1268,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextension k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake -# k8s.io/apimachinery v0.24.3 => k8s.io/apimachinery v0.21.4 +# k8s.io/apimachinery v0.24.3 => k8s.io/apimachinery v0.21.5 ## explicit k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1326,19 +1328,19 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.21.4 => k8s.io/apiserver v0.21.4 +# k8s.io/apiserver v0.21.5 => k8s.io/apiserver v0.21.5 ## explicit k8s.io/apiserver/pkg/authentication/serviceaccount k8s.io/apiserver/pkg/authentication/user k8s.io/apiserver/pkg/endpoints/deprecation k8s.io/apiserver/pkg/features k8s.io/apiserver/pkg/util/feature -# k8s.io/cli-runtime v0.21.4 => k8s.io/cli-runtime v0.21.4 +# k8s.io/cli-runtime v0.21.5 => k8s.io/cli-runtime v0.21.5 ## explicit k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.21.4 +# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.21.5 ## explicit k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -1624,9 +1626,9 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.21.4 => k8s.io/cloud-provider v0.21.4 +# k8s.io/cloud-provider v0.21.5 => k8s.io/cloud-provider v0.21.5 k8s.io/cloud-provider -# k8s.io/code-generator v0.21.4 => k8s.io/code-generator v0.21.4 +# k8s.io/code-generator v0.21.5 => k8s.io/code-generator v0.21.5 ## explicit k8s.io/code-generator k8s.io/code-generator/cmd/client-gen @@ -1661,14 +1663,14 @@ k8s.io/code-generator/cmd/set-gen k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect -# k8s.io/component-base v0.22.0 => k8s.io/component-base v0.21.4 +# k8s.io/component-base v0.22.0 => k8s.io/component-base v0.21.5 k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 k8s.io/component-base/featuregate k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry k8s.io/component-base/version -# k8s.io/component-helpers v0.21.4 => k8s.io/component-helpers v0.21.4 +# k8s.io/component-helpers v0.21.5 => k8s.io/component-helpers v0.21.5 ## explicit k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity @@ -1686,7 +1688,7 @@ k8s.io/gengo/parser k8s.io/gengo/types # k8s.io/klog v1.0.0 k8s.io/klog -# k8s.io/klog/v2 v2.60.1 => k8s.io/klog/v2 v2.4.0 +# k8s.io/klog/v2 v2.60.1 => k8s.io/klog/v2 v2.8.0 k8s.io/klog/v2 # k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 k8s.io/kube-openapi/cmd/openapi-gen/args @@ -1697,10 +1699,10 @@ k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto/testing k8s.io/kube-openapi/pkg/util/proto/validation k8s.io/kube-openapi/pkg/util/sets -# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.21.4 +# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.21.5 ## explicit k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.21.4 => k8s.io/kubectl v0.21.4 +# k8s.io/kubectl v0.21.5 => k8s.io/kubectl v0.21.5 ## explicit k8s.io/kubectl/pkg/cmd/testing k8s.io/kubectl/pkg/cmd/util @@ -1712,7 +1714,7 @@ k8s.io/kubectl/pkg/util/openapi/validation k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/kubernetes v1.21.4 => k8s.io/kubernetes v1.21.4 +# k8s.io/kubernetes v1.21.5 => k8s.io/kubernetes v1.21.5 ## explicit k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/v1/pod @@ -1750,12 +1752,13 @@ k8s.io/kubernetes/pkg/volume/util/recyclerclient k8s.io/kubernetes/pkg/volume/util/subpath k8s.io/kubernetes/pkg/volume/util/types k8s.io/kubernetes/pkg/volume/util/volumepathhandler -# k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.21.4 +# k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.21.5 k8s.io/mount-utils -# k8s.io/utils v0.0.0-20210527160623-6fdb442a123b +# k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 k8s.io/utils/buffer k8s.io/utils/exec k8s.io/utils/integer +k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/io k8s.io/utils/keymutex k8s.io/utils/net @@ -1767,7 +1770,7 @@ k8s.io/utils/trace # sigs.k8s.io/cluster-api v0.2.11 sigs.k8s.io/cluster-api/errors sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1 -# sigs.k8s.io/controller-runtime v0.9.0 => sigs.k8s.io/controller-runtime v0.9.0 +# sigs.k8s.io/controller-runtime v0.9.7 => sigs.k8s.io/controller-runtime v0.9.6 ## explicit sigs.k8s.io/controller-runtime/pkg/cache sigs.k8s.io/controller-runtime/pkg/cache/internal @@ -1906,32 +1909,32 @@ sigs.k8s.io/yaml # github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 # github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 # gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 -# helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.0 -# k8s.io/api => k8s.io/api v0.21.4 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.4 -# k8s.io/apimachinery => k8s.io/apimachinery v0.21.4 -# k8s.io/apiserver => k8s.io/apiserver v0.21.4 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.4 -# k8s.io/client-go => k8s.io/client-go v0.21.4 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.4 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.4 -# k8s.io/code-generator => k8s.io/code-generator v0.21.4 -# k8s.io/component-base => k8s.io/component-base v0.21.4 -# k8s.io/component-helpers => k8s.io/component-helpers v0.21.4 -# k8s.io/controller-manager => k8s.io/controller-manager v0.21.4 -# k8s.io/cri-api => k8s.io/cri-api v0.21.4 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.4 -# k8s.io/klog/v2 => k8s.io/klog/v2 v2.4.0 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.4 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.4 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.4 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.4 -# k8s.io/kubectl => k8s.io/kubectl v0.21.4 -# k8s.io/kubelet => k8s.io/kubelet v0.21.4 -# k8s.io/kubernetes => k8s.io/kubernetes v1.21.4 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.4 -# k8s.io/metrics => k8s.io/metrics v0.21.4 -# k8s.io/mount-utils => k8s.io/mount-utils v0.21.4 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.4 -# sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.0 +# helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.1 +# k8s.io/api => k8s.io/api v0.21.5 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.5 +# k8s.io/apimachinery => k8s.io/apimachinery v0.21.5 +# k8s.io/apiserver => k8s.io/apiserver v0.21.5 +# k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.5 +# k8s.io/client-go => k8s.io/client-go v0.21.5 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.5 +# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.5 +# k8s.io/code-generator => k8s.io/code-generator v0.21.5 +# k8s.io/component-base => k8s.io/component-base v0.21.5 +# k8s.io/component-helpers => k8s.io/component-helpers v0.21.5 +# k8s.io/controller-manager => k8s.io/controller-manager v0.21.5 +# k8s.io/cri-api => k8s.io/cri-api v0.21.5 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.5 +# k8s.io/klog/v2 => k8s.io/klog/v2 v2.8.0 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.5 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.5 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.5 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.5 +# k8s.io/kubectl => k8s.io/kubectl v0.21.5 +# k8s.io/kubelet => k8s.io/kubelet v0.21.5 +# k8s.io/kubernetes => k8s.io/kubernetes v1.21.5 +# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.5 +# k8s.io/metrics => k8s.io/metrics v0.21.5 +# k8s.io/mount-utils => k8s.io/mount-utils v0.21.5 +# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.5 +# sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.6 # sigs.k8s.io/sig-storage-lib-external-provisioner/v6 => sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.3.0 diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index dee523fe23..6862fd62bd 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -69,7 +69,7 @@ type Informers interface { client.FieldIndexer } -// Informer - informer allows you interact with the underlying informer +// Informer - informer allows you interact with the underlying informer. type Informer interface { // AddEventHandler adds an event handler to the shared informer using the shared informer's resync // period. Events to a single handler are delivered sequentially, but there is no coordination @@ -82,14 +82,14 @@ type Informer interface { // AddIndexers adds more indexers to this store. If you call this after you already have data // in the store, the results are undefined. AddIndexers(indexers toolscache.Indexers) error - //HasSynced return true if the informers underlying store has synced + // HasSynced return true if the informers underlying store has synced. HasSynced() bool } -// SelectorsByObject associate a client.Object's GVK to a field/label selector +// SelectorsByObject associate a client.Object's GVK to a field/label selector. type SelectorsByObject map[client.Object]internal.Selector -// Options are the optional arguments for creating a new InformersMap object +// Options are the optional arguments for creating a new InformersMap object. type Options struct { // Scheme is the scheme to use for mapping objects to GroupVersionKinds Scheme *runtime.Scheme diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go index 8ec3b921d9..90647c8e33 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -50,7 +50,7 @@ type informerCache struct { *internal.InformersMap } -// Get implements Reader +// Get implements Reader. func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object) error { gvk, err := apiutil.GVKForObject(out, ip.Scheme) if err != nil { @@ -68,9 +68,8 @@ func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out clie return cache.Reader.Get(ctx, key, out) } -// List implements Reader +// List implements Reader. func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { - gvk, cacheTypeObj, err := ip.objectTypeForListObject(out) if err != nil { return err @@ -130,7 +129,7 @@ func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schem return &gvk, cacheTypeObj, nil } -// GetInformerForKind returns the informer for the GroupVersionKind +// GetInformerForKind returns the informer for the GroupVersionKind. func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { // Map the gvk to an object obj, err := ip.Scheme.New(gvk) @@ -145,7 +144,7 @@ func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.Grou return i.Informer, err } -// GetInformer returns the informer for the obj +// GetInformer returns the informer for the obj. func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { gvk, err := apiutil.GVKForObject(obj, ip.Scheme) if err != nil { @@ -160,7 +159,7 @@ func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (In } // NeedLeaderElection implements the LeaderElectionRunnable interface -// to indicate that this can be started without requiring the leader lock +// to indicate that this can be started without requiring the leader lock. func (ip *informerCache) NeedLeaderElection() bool { return false } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go index bd546b934a..5a495693ed 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -21,7 +21,7 @@ import ( "fmt" "reflect" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -33,10 +33,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// CacheReader is a client.Reader +// CacheReader is a client.Reader. var _ client.Reader = &CacheReader{} -// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type +// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type. type CacheReader struct { // indexer is the underlying indexer wrapped by this cache. indexer cache.Indexer @@ -48,7 +48,7 @@ type CacheReader struct { scopeName apimeta.RESTScopeName } -// Get checks the indexer for the object and writes a copy of it if found +// Get checks the indexer for the object and writes a copy of it if found. func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object) error { if c.scopeName == apimeta.RESTScopeNameRoot { key.Namespace = "" @@ -64,7 +64,7 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Ob // Not found, return an error if !exists { // Resource gets transformed into Kind in the error anyway, so this is fine - return errors.NewNotFound(schema.GroupResource{ + return apierrors.NewNotFound(schema.GroupResource{ Group: c.groupVersionKind.Group, Resource: c.groupVersionKind.Kind, }, key.Name) @@ -93,7 +93,7 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Ob return nil } -// List lists items out of the indexer and writes them to out +// List lists items out of the indexer and writes them to out. func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...client.ListOption) error { var objs []interface{} var err error @@ -101,7 +101,8 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) - if listOpts.FieldSelector != nil { + switch { + case listOpts.FieldSelector != nil: // TODO(directxman12): support more complicated field selectors by // combining multiple indices, GetIndexers, etc field, val, requiresExact := requiresExactMatch(listOpts.FieldSelector) @@ -112,9 +113,9 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli // namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces" // namespace. objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val)) - } else if listOpts.Namespace != "" { + case listOpts.Namespace != "": objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace) - } else { + default: objs = c.indexer.List() } if err != nil { @@ -186,7 +187,7 @@ func FieldIndexName(field string) string { return "field:" + field } -// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces +// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces. const allNamespacesNamespace = "__all_namespaces" // KeyToNamespacedKey prefixes the given index key with a namespace diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go index 2242d9b674..841f1657eb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go @@ -52,7 +52,6 @@ func NewInformersMap(config *rest.Config, namespace string, selectors SelectorsByGVK, ) *InformersMap { - return &InformersMap{ structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors), unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors), diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go index 5c9bd0b0a0..007a28e727 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go @@ -37,7 +37,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// clientListWatcherFunc knows how to create a ListWatcher +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// clientListWatcherFunc knows how to create a ListWatcher. type createListWatcherFunc func(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) // newSpecificInformersMap returns a new specificInformersMap (like @@ -65,7 +69,7 @@ func newSpecificInformersMap(config *rest.Config, return ip } -// MapEntry contains the cached data for an Informer +// MapEntry contains the cached data for an Informer. type MapEntry struct { // Informer is the cached informer Informer cache.SharedIndexInformer @@ -270,8 +274,9 @@ func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformer ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { ip.selectors[gvk].ApplyToList(&opts) res := listObj.DeepCopyObject() - isNamespaceScoped := ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot - err := client.Get().NamespaceIfScoped(ip.namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk]) + isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot + err := client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res) return res, err }, // Setup the watch function @@ -279,8 +284,9 @@ func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformer ip.selectors[gvk].ApplyToList(&opts) // Watch needs to be set to true separately opts.Watch = true - isNamespaceScoped := ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot - return client.Get().NamespaceIfScoped(ip.namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk]) + isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot + return client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx) }, }, nil } @@ -309,8 +315,9 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform return &cache.ListWatch{ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { ip.selectors[gvk].ApplyToList(&opts) - if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return dynamicClient.Resource(mapping.Resource).Namespace(ip.namespace).List(ctx, opts) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk]) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return dynamicClient.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) } return dynamicClient.Resource(mapping.Resource).List(ctx, opts) }, @@ -319,8 +326,9 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform ip.selectors[gvk].ApplyToList(&opts) // Watch needs to be set to true separately opts.Watch = true - if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return dynamicClient.Resource(mapping.Resource).Namespace(ip.namespace).Watch(ctx, opts) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk]) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return dynamicClient.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) } return dynamicClient.Resource(mapping.Resource).Watch(ctx, opts) }, @@ -354,8 +362,9 @@ func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersM return &cache.ListWatch{ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { ip.selectors[gvk].ApplyToList(&opts) - if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return client.Resource(mapping.Resource).Namespace(ip.namespace).List(ctx, opts) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk]) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return client.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) } return client.Resource(mapping.Resource).List(ctx, opts) }, @@ -364,8 +373,9 @@ func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersM ip.selectors[gvk].ApplyToList(&opts) // Watch needs to be set to true separately opts.Watch = true - if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return client.Resource(mapping.Resource).Namespace(ip.namespace).Watch(ctx, opts) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk]) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return client.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) } return client.Resource(mapping.Resource).Watch(ctx, opts) }, @@ -378,7 +388,27 @@ func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersM func resyncPeriod(resync time.Duration) func() time.Duration { return func() time.Duration { // the factor will fall into [0.9, 1.1) - factor := rand.Float64()/5.0 + 0.9 + factor := rand.Float64()/5.0 + 0.9 //nolint:gosec return time.Duration(float64(resync.Nanoseconds()) * factor) } } + +// restrictNamespaceBySelector returns either a global restriction for all ListWatches +// if not default/empty, or the namespace that a ListWatch for the specific resource +// is restricted to, based on a specified field selector for metadata.namespace field. +func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { + if namespaceOpt != "" { + // namespace is already restricted + return namespaceOpt + } + fieldSelector := s.Field + if fieldSelector == nil || fieldSelector.Empty() { + return "" + } + // check whether a selector includes the namespace field + value, found := fieldSelector.RequiresExactMatch("metadata.namespace") + if found { + return value + } + return "" +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go index 0e872eaf02..cd9c580008 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go @@ -23,16 +23,16 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// SelectorsByGVK associate a GroupVersionKind to a field/label selector +// SelectorsByGVK associate a GroupVersionKind to a field/label selector. type SelectorsByGVK map[schema.GroupVersionKind]Selector -// Selector specify the label/field selector to fill in ListOptions +// Selector specify the label/field selector to fill in ListOptions. type Selector struct { Label labels.Selector Field fields.Selector } -// ApplyToList fill in ListOptions LabelSelector and FieldSelector if needed +// ApplyToList fill in ListOptions LabelSelector and FieldSelector if needed. func (s Selector) ApplyToList(listOpts *metav1.ListOptions) { if s.Label != nil { listOpts.LabelSelector = s.Label.String() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go index f3520bf8d7..dc29651b01 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -22,7 +22,6 @@ import ( "time" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" apimeta "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -32,10 +31,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" ) -// NewCacheFunc - Function for creating a new cache from the options and a rest config +// NewCacheFunc - Function for creating a new cache from the options and a rest config. type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) -// a new global namespaced cache to handle cluster scoped resources +// a new global namespaced cache to handle cluster scoped resources. const globalCache = "_cluster-scope" // MultiNamespacedCacheBuilder - Builder function to create a new multi-namespaced cache. @@ -78,13 +77,13 @@ func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { type multiNamespaceCache struct { namespaceToCache map[string]Cache Scheme *runtime.Scheme - RESTMapper meta.RESTMapper + RESTMapper apimeta.RESTMapper clusterCache Cache } var _ Cache = &multiNamespaceCache{} -// Methods for multiNamespaceCache to conform to the Informers interface +// Methods for multiNamespaceCache to conform to the Informers interface. func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { informers := map[string]Informer{} @@ -186,7 +185,7 @@ func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool { func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { - return nil + return nil //nolint:nilerr } if !isNamespaced { @@ -242,7 +241,7 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, return cache.List(ctx, list, opts...) } - listAccessor, err := meta.ListAccessor(list) + listAccessor, err := apimeta.ListAccessor(list) if err != nil { return err } @@ -265,7 +264,7 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, if err != nil { return err } - accessor, err := meta.ListAccessor(listObj) + accessor, err := apimeta.ListAccessor(listObj) if err != nil { return fmt.Errorf("object: %T must be a list type", list) } @@ -289,28 +288,28 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, return apimeta.SetList(list, allItems) } -// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces +// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces. type multiNamespaceInformer struct { namespaceToInformer map[string]Informer } var _ Informer = &multiNamespaceInformer{} -// AddEventHandler adds the handler to each namespaced informer +// AddEventHandler adds the handler to each namespaced informer. func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) { for _, informer := range i.namespaceToInformer { informer.AddEventHandler(handler) } } -// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer +// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer. func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) { for _, informer := range i.namespaceToInformer { informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) } } -// AddIndexers adds the indexer for each namespaced informer +// AddIndexers adds the indexer for each namespaced informer. func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error { for _, informer := range i.namespaceToInformer { err := informer.AddIndexers(indexers) @@ -321,7 +320,7 @@ func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error return nil } -// HasSynced checks if each namespaced informer has synced +// HasSynced checks if each namespaced informer has synced. func (i *multiNamespaceInformer) HasSynced() bool { for _, informer := range i.namespaceToInformer { if ok := informer.HasSynced(); !ok { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index bb66a6dfdd..2611a20c64 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -80,7 +80,7 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi // (unstructured, partial, etc) // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds - _, isPartial := obj.(*metav1.PartialObjectMetadata) + _, isPartial := obj.(*metav1.PartialObjectMetadata) //nolint:ifshort _, isPartialList := obj.(*metav1.PartialObjectMetadataList) if isPartial || isPartialList { // we require that the GVK be populated in order to recognize the object @@ -134,7 +134,7 @@ func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ return serializer } -//createRestConfig copies the base config and updates needed fields for a new rest config +// createRestConfig copies the base config and updates needed fields for a new rest config. func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) *rest.Config { gv := gvk.GroupVersion() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go index 5e9a7b5f53..56a00371ff 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go @@ -41,7 +41,7 @@ type dynamicRESTMapper struct { initOnce sync.Once } -// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper +// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper. type DynamicRESTMapperOption func(*dynamicRESTMapper) error // WithLimiter sets the RESTMapper's underlying limiter to lim. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 3444ab52b4..bbe36c4673 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -49,7 +49,7 @@ type WarningHandlerOptions struct { AllowDuplicateLogs bool } -// Options are creation options for a Client +// Options are creation options for a Client. type Options struct { // Scheme, if provided, will be used to map go structs to GroupVersionKinds Scheme *runtime.Scheme @@ -178,7 +178,7 @@ func (c *client) RESTMapper() meta.RESTMapper { return c.mapper } -// Create implements client.Client +// Create implements client.Client. func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { switch obj.(type) { case *unstructured.Unstructured: @@ -190,7 +190,7 @@ func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) e } } -// Update implements client.Client +// Update implements client.Client. func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { @@ -203,7 +203,7 @@ func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) e } } -// Delete implements client.Client +// Delete implements client.Client. func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { switch obj.(type) { case *unstructured.Unstructured: @@ -215,7 +215,7 @@ func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) e } } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { switch obj.(type) { case *unstructured.Unstructured: @@ -227,7 +227,7 @@ func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllO } } -// Patch implements client.Client +// Patch implements client.Client. func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { @@ -240,7 +240,7 @@ func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...Pat } } -// Get implements client.Client +// Get implements client.Client. func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error { switch obj.(type) { case *unstructured.Unstructured: @@ -254,7 +254,7 @@ func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error { } } -// List implements client.Client +// List implements client.Client. func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { switch x := obj.(type) { case *unstructured.UnstructuredList: @@ -288,20 +288,20 @@ func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) e } } -// Status implements client.StatusClient +// Status implements client.StatusClient. func (c *client) Status() StatusWriter { return &statusWriter{client: c} } -// statusWriter is client.StatusWriter that writes status subresource +// statusWriter is client.StatusWriter that writes status subresource. type statusWriter struct { client *client } -// ensure statusWriter implements client.StatusWriter +// ensure statusWriter implements client.StatusWriter. var _ StatusWriter = &statusWriter{} -// Update implements client.StatusWriter +// Update implements client.StatusWriter. func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { @@ -314,7 +314,7 @@ func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOp } } -// Patch implements client.Client +// Patch implements client.Client. func (sw *statusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go index b3493cb025..857a0b38a7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// clientCache creates and caches rest clients and metadata for Kubernetes types +// clientCache creates and caches rest clients and metadata for Kubernetes types. type clientCache struct { // config is the rest.Config to talk to an apiserver config *rest.Config @@ -107,7 +107,7 @@ func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { return r, err } -// getObjMeta returns objMeta containing both type and object metadata and state +// getObjMeta returns objMeta containing both type and object metadata and state. func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { r, err := c.getResource(obj) if err != nil { @@ -130,17 +130,17 @@ type resourceMeta struct { mapping *meta.RESTMapping } -// isNamespaced returns true if the type is namespaced +// isNamespaced returns true if the type is namespaced. func (r *resourceMeta) isNamespaced() bool { return r.mapping.Scope.Name() != meta.RESTScopeNameRoot } -// resource returns the resource name of the type +// resource returns the resource name of the type. func (r *resourceMeta) resource() string { return r.mapping.Resource.Resource } -// objMeta stores type and object information about a Kubernetes type +// objMeta stores type and object information about a Kubernetes type. type objMeta struct { // resourceMeta contains type information for the object *resourceMeta diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go index 67e80e0551..ea25ea2530 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -46,47 +46,47 @@ func (c *dryRunClient) RESTMapper() meta.RESTMapper { return c.client.RESTMapper() } -// Create implements client.Client +// Create implements client.Client. func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { return c.client.Create(ctx, obj, append(opts, DryRunAll)...) } -// Update implements client.Client +// Update implements client.Client. func (c *dryRunClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { return c.client.Update(ctx, obj, append(opts, DryRunAll)...) } -// Delete implements client.Client +// Delete implements client.Client. func (c *dryRunClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { return c.client.Delete(ctx, obj, append(opts, DryRunAll)...) } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...) } -// Patch implements client.Client +// Patch implements client.Client. func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) } -// Get implements client.Client +// Get implements client.Client. func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object) error { return c.client.Get(ctx, key, obj) } -// List implements client.Client +// List implements client.Client. func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { return c.client.List(ctx, obj, opts...) } -// Status implements client.StatusClient +// Status implements client.StatusClient. func (c *dryRunClient) Status() StatusWriter { return &dryRunStatusWriter{client: c.client.Status()} } -// ensure dryRunStatusWriter implements client.StatusWriter +// ensure dryRunStatusWriter implements client.StatusWriter. var _ StatusWriter = &dryRunStatusWriter{} // dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode @@ -95,12 +95,12 @@ type dryRunStatusWriter struct { client StatusWriter } -// Update implements client.StatusWriter +// Update implements client.StatusWriter. func (sw *dryRunStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { return sw.client.Update(ctx, obj, append(opts, DryRunAll)...) } -// Patch implements client.StatusWriter +// Patch implements client.StatusWriter. func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go index 0dfea4d6c5..58c2ece15b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -30,7 +30,7 @@ import ( // ObjectKey identifies a Kubernetes Object. type ObjectKey = types.NamespacedName -// ObjectKeyFromObject returns the ObjectKey given a runtime.Object +// ObjectKeyFromObject returns the ObjectKey given a runtime.Object. func ObjectKeyFromObject(obj Object) ObjectKey { return ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()} } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go index c0fc72c5b7..59747463a4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go @@ -49,7 +49,7 @@ func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns s return mc.client.Resource(mapping.Resource).Namespace(ns), nil } -// Delete implements client.Client +// Delete implements client.Client. func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { metadata, ok := obj.(*metav1.PartialObjectMetadata) if !ok { @@ -67,7 +67,7 @@ func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...Delete return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions()) } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { metadata, ok := obj.(*metav1.PartialObjectMetadata) if !ok { @@ -85,7 +85,7 @@ func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...D return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) } -// Patch implements client.Client +// Patch implements client.Client. func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { metadata, ok := obj.(*metav1.PartialObjectMetadata) if !ok { @@ -115,7 +115,7 @@ func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, op return nil } -// Get implements client.Client +// Get implements client.Client. func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) error { metadata, ok := obj.(*metav1.PartialObjectMetadata) if !ok { @@ -138,7 +138,7 @@ func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) er return nil } -// List implements client.Client +// List implements client.Client. func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { metadata, ok := obj.(*metav1.PartialObjectMetadataList) if !ok { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go index cedcfb5961..d73cc5135a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -86,7 +86,7 @@ func isNamespaced(c Client, obj runtime.Object) (bool, error) { scope := restmapping.Scope.Name() if scope == "" { - return false, errors.New("Scope cannot be identified. Empty scope returned") + return false, errors.New("scope cannot be identified, empty scope returned") } if scope != meta.RESTScopeNameRoot { @@ -95,7 +95,7 @@ func isNamespaced(c Client, obj runtime.Object) (bool, error) { return false, nil } -// Create implements clinet.Client +// Create implements clinet.Client. func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -104,7 +104,7 @@ func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...Creat objectNamespace := obj.GetNamespace() if objectNamespace != n.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -113,7 +113,7 @@ func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...Creat return n.client.Create(ctx, obj, opts...) } -// Update implements client.Client +// Update implements client.Client. func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -122,7 +122,7 @@ func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...Updat objectNamespace := obj.GetNamespace() if objectNamespace != n.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -131,7 +131,7 @@ func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...Updat return n.client.Update(ctx, obj, opts...) } -// Delete implements client.Client +// Delete implements client.Client. func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -140,7 +140,7 @@ func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...Delet objectNamespace := obj.GetNamespace() if objectNamespace != n.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -149,7 +149,7 @@ func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...Delet return n.client.Delete(ctx, obj, opts...) } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -162,7 +162,7 @@ func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ... return n.client.DeleteAllOf(ctx, obj, opts...) } -// Patch implements client.Client +// Patch implements client.Client. func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -171,7 +171,7 @@ func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, o objectNamespace := obj.GetNamespace() if objectNamespace != n.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -180,7 +180,7 @@ func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, o return n.client.Patch(ctx, obj, patch, opts...) } -// Get implements client.Client +// Get implements client.Client. func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -188,14 +188,14 @@ func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) e } if isNamespaceScoped { if key.Namespace != "" && key.Namespace != n.namespace { - return fmt.Errorf("Namespace %s provided for the object %s does not match the namesapce %s on the client", key.Namespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s provided for the object %s does not match the namesapce %s on the client", key.Namespace, obj.GetName(), n.namespace) } key.Namespace = n.namespace } return n.client.Get(ctx, key, obj) } -// List implements client.Client +// List implements client.Client. func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { if n.namespace != "" { opts = append(opts, InNamespace(n.namespace)) @@ -203,12 +203,12 @@ func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...Lis return n.client.List(ctx, obj, opts...) } -// Status implements client.StatusClient +// Status implements client.StatusClient. func (n *namespacedClient) Status() StatusWriter { return &namespacedClientStatusWriter{StatusClient: n.client.Status(), namespace: n.namespace, namespacedclient: n} } -// ensure namespacedClientStatusWriter implements client.StatusWriter +// ensure namespacedClientStatusWriter implements client.StatusWriter. var _ StatusWriter = &namespacedClientStatusWriter{} type namespacedClientStatusWriter struct { @@ -217,7 +217,7 @@ type namespacedClientStatusWriter struct { namespacedclient Client } -// Update implements client.StatusWriter +// Update implements client.StatusWriter. func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { isNamespaceScoped, err := isNamespaced(nsw.namespacedclient, obj) if err != nil { @@ -226,7 +226,7 @@ func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, objectNamespace := obj.GetNamespace() if objectNamespace != nsw.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -235,7 +235,7 @@ func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, return nsw.StatusClient.Update(ctx, obj, opts...) } -// Patch implements client.StatusWriter +// Patch implements client.StatusWriter. func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { isNamespaceScoped, err := isNamespaced(nsw.namespacedclient, obj) if err != nil { @@ -244,7 +244,7 @@ func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, objectNamespace := obj.GetNamespace() if objectNamespace != nsw.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) } if isNamespaceScoped && objectNamespace == "" { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index f253276466..aa2299eac0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -158,7 +158,7 @@ func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions { return o } -// ApplyToCreate implements CreateOption +// ApplyToCreate implements CreateOption. func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { if o.DryRun != nil { co.DryRun = o.DryRun @@ -239,7 +239,7 @@ func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions { var _ DeleteOption = &DeleteOptions{} -// ApplyToDelete implements DeleteOption +// ApplyToDelete implements DeleteOption. func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) { if o.GracePeriodSeconds != nil { do.GracePeriodSeconds = o.GracePeriodSeconds @@ -349,7 +349,7 @@ type ListOptions struct { var _ ListOption = &ListOptions{} -// ApplyToList implements ListOption for ListOptions +// ApplyToList implements ListOption for ListOptions. func (o *ListOptions) ApplyToList(lo *ListOptions) { if o.LabelSelector != nil { lo.LabelSelector = o.LabelSelector @@ -569,7 +569,7 @@ func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions { var _ UpdateOption = &UpdateOptions{} -// ApplyToUpdate implements UpdateOption +// ApplyToUpdate implements UpdateOption. func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { if o.DryRun != nil { uo.DryRun = o.DryRun @@ -636,7 +636,7 @@ func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { var _ PatchOption = &PatchOptions{} -// ApplyToPatch implements PatchOptions +// ApplyToPatch implements PatchOptions. func (o *PatchOptions) ApplyToPatch(po *PatchOptions) { if o.DryRun != nil { po.DryRun = o.DryRun @@ -688,7 +688,7 @@ func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOf var _ DeleteAllOfOption = &DeleteAllOfOptions{} -// ApplyToDeleteAllOf implements DeleteAllOfOption +// ApplyToDeleteAllOf implements DeleteAllOfOption. func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) { o.ApplyToList(&do.ListOptions) o.ApplyToDelete(&do.DeleteOptions) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go index a1b32653ca..dde7b21f25 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -33,7 +33,7 @@ type typedClient struct { paramCodec runtime.ParameterCodec } -// Create implements client.Client +// Create implements client.Client. func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -51,7 +51,7 @@ func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOpti Into(obj) } -// Update implements client.Client +// Update implements client.Client. func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -70,7 +70,7 @@ func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOpti Into(obj) } -// Delete implements client.Client +// Delete implements client.Client. func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -89,7 +89,7 @@ func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOpti Error() } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -108,7 +108,7 @@ func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...Delet Error() } -// Patch implements client.Client +// Patch implements client.Client. func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -131,7 +131,7 @@ func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts . Into(obj) } -// Get implements client.Client +// Get implements client.Client. func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { r, err := c.cache.getResource(obj) if err != nil { @@ -143,7 +143,7 @@ func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object) error Name(key.Name).Do(ctx).Into(obj) } -// List implements client.Client +// List implements client.Client. func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { r, err := c.cache.getResource(obj) if err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go index f8fb3ccec1..dcf15be275 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -36,7 +36,7 @@ type unstructuredClient struct { paramCodec runtime.ParameterCodec } -// Create implements client.Client +// Create implements client.Client. func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { @@ -64,7 +64,7 @@ func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...Cr return result } -// Update implements client.Client +// Update implements client.Client. func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { @@ -93,7 +93,7 @@ func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...Up return result } -// Delete implements client.Client +// Delete implements client.Client. func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { _, ok := obj.(*unstructured.Unstructured) if !ok { @@ -116,7 +116,7 @@ func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...De Error() } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { _, ok := obj.(*unstructured.Unstructured) if !ok { @@ -139,7 +139,7 @@ func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts Error() } -// Patch implements client.Client +// Patch implements client.Client. func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { _, ok := obj.(*unstructured.Unstructured) if !ok { @@ -167,7 +167,7 @@ func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch Into(obj) } -// Get implements client.Client +// Get implements client.Client. func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object) error { u, ok := obj.(*unstructured.Unstructured) if !ok { @@ -193,7 +193,7 @@ func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object return result } -// List implements client.Client +// List implements client.Client. func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { u, ok := obj.(*unstructured.UnstructuredList) if !ok { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go index 76fa72ad76..dfd0fa9dd8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go @@ -139,10 +139,10 @@ type Options struct { newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) } -// Option can be used to manipulate Options +// Option can be used to manipulate Options. type Option func(*Options) -// New constructs a brand new cluster +// New constructs a brand new cluster. func New(config *rest.Config, opts ...Option) (Cluster, error) { if config == nil { return nil, errors.New("must specify Config") @@ -204,7 +204,7 @@ func New(config *rest.Config, opts ...Option) (Cluster, error) { }, nil } -// setOptionsDefaults set default values for Options fields +// setOptionsDefaults set default values for Options fields. func setOptionsDefaults(options Options) Options { // Use the Kubernetes client-go scheme if none is specified if options.Scheme == nil { @@ -252,10 +252,10 @@ func setOptionsDefaults(options Options) Options { return options } -// NewClientFunc allows a user to define how to create a client +// NewClientFunc allows a user to define how to create a client. type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) -// DefaultNewClient creates the default caching client +// DefaultNewClient creates the default caching client. func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { c, err := client.New(config, options) if err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go index fce75d7bfb..f23b02df00 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go @@ -28,7 +28,7 @@ import ( ) // ControllerManagerConfiguration defines the functions necessary to parse a config file -// and to configure the Options struct for the ctrl.Manager +// and to configure the Options struct for the ctrl.Manager. type ControllerManagerConfiguration interface { runtime.Object @@ -37,7 +37,7 @@ type ControllerManagerConfiguration interface { } // DeferredFileLoader is used to configure the decoder for loading controller -// runtime component config types +// runtime component config types. type DeferredFileLoader struct { ControllerManagerConfiguration path string @@ -62,7 +62,7 @@ func File() *DeferredFileLoader { } } -// Complete will use sync.Once to set the scheme +// Complete will use sync.Once to set the scheme. func (d *DeferredFileLoader) Complete() (v1alpha1.ControllerManagerConfigurationSpec, error) { d.once.Do(d.loadFile) if d.err != nil { @@ -71,25 +71,25 @@ func (d *DeferredFileLoader) Complete() (v1alpha1.ControllerManagerConfiguration return d.ControllerManagerConfiguration.Complete() } -// AtPath will set the path to load the file for the decoder +// AtPath will set the path to load the file for the decoder. func (d *DeferredFileLoader) AtPath(path string) *DeferredFileLoader { d.path = path return d } -// OfKind will set the type to be used for decoding the file into +// OfKind will set the type to be used for decoding the file into. func (d *DeferredFileLoader) OfKind(obj ControllerManagerConfiguration) *DeferredFileLoader { d.ControllerManagerConfiguration = obj return d } -// InjectScheme will configure the scheme to be used for decoding the file +// InjectScheme will configure the scheme to be used for decoding the file. func (d *DeferredFileLoader) InjectScheme(scheme *runtime.Scheme) error { d.scheme = scheme return nil } -// loadFile is used from the mutex.Once to load the file +// loadFile is used from the mutex.Once to load the file. func (d *DeferredFileLoader) loadFile() { if d.scheme == nil { d.err = fmt.Errorf("scheme not supplied to controller configuration loader") @@ -109,6 +109,4 @@ func (d *DeferredFileLoader) loadFile() { if err = runtime.DecodeInto(codecs.UniversalDecoder(), content, d.ControllerManagerConfiguration); err != nil { d.err = fmt.Errorf("could not decode file into runtime.Object") } - - return } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go index 72baa27f19..9efdbc0668 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go @@ -22,10 +22,10 @@ import ( ) var ( - // GroupVersion is group version used to register these objects + // GroupVersion is group version used to register these objects. GroupVersion = schema.GroupVersion{Group: "controller-runtime.sigs.k8s.io", Version: "v1alpha1"} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go index e13f1c0090..e67b62e514 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go @@ -24,7 +24,7 @@ import ( configv1alpha1 "k8s.io/component-base/config/v1alpha1" ) -// ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration +// ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration. type ControllerManagerConfigurationSpec struct { // SyncPeriod determines the minimum frequency at which watched resources are // reconciled. A lower period will correct entropy more quickly, but reduce @@ -96,7 +96,7 @@ type ControllerConfigurationSpec struct { CacheSyncTimeout *time.Duration `json:"cacheSyncTimeout,omitempty"` } -// ControllerMetrics defines the metrics configs +// ControllerMetrics defines the metrics configs. type ControllerMetrics struct { // BindAddress is the TCP address that the controller should bind to // for serving prometheus metrics. @@ -105,7 +105,7 @@ type ControllerMetrics struct { BindAddress string `json:"bindAddress,omitempty"` } -// ControllerHealth defines the health configs +// ControllerHealth defines the health configs. type ControllerHealth struct { // HealthProbeBindAddress is the TCP address that the controller should bind to // for serving health probes @@ -121,7 +121,7 @@ type ControllerHealth struct { LivenessEndpointName string `json:"livenessEndpointName,omitempty"` } -// ControllerWebhook defines the webhook server for the controller +// ControllerWebhook defines the webhook server for the controller. type ControllerWebhook struct { // Port is the port that the webhook server serves at. // It is used to set webhook.Server.Port. @@ -143,7 +143,7 @@ type ControllerWebhook struct { // +kubebuilder:object:root=true -// ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API +// ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API. type ControllerManagerConfiguration struct { metav1.TypeMeta `json:",inline"` @@ -151,7 +151,7 @@ type ControllerManagerConfiguration struct { ControllerManagerConfigurationSpec `json:",inline"` } -// Complete returns the configuration for controller-runtime +// Complete returns the configuration for controller-runtime. func (c *ControllerManagerConfigurationSpec) Complete() (ControllerManagerConfigurationSpec, error) { return *c, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go index 85d8d6d54c..c9e07562a3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -// Options are the arguments for creating a new Controller +// Options are the arguments for creating a new Controller. type Options struct { // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. MaxConcurrentReconciles int diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go index fb8987cfe9..e6d3a4eaab 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go @@ -35,7 +35,7 @@ var _ EventHandler = &EnqueueRequestForObject{} // Controllers that have associated Resources (e.g. CRDs) to reconcile the associated Resource. type EnqueueRequestForObject struct{} -// Create implements EventHandler +// Create implements EventHandler. func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt) @@ -47,24 +47,25 @@ func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.Rate }}) } -// Update implements EventHandler +// Update implements EventHandler. func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { - if evt.ObjectNew != nil { + switch { + case evt.ObjectNew != nil: q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ Name: evt.ObjectNew.GetName(), Namespace: evt.ObjectNew.GetNamespace(), }}) - } else if evt.ObjectOld != nil { + case evt.ObjectOld != nil: q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ Name: evt.ObjectOld.GetName(), Namespace: evt.ObjectOld.GetNamespace(), }}) - } else { + default: enqueueLog.Error(nil, "UpdateEvent received with no metadata", "event", evt) } } -// Delete implements EventHandler +// Delete implements EventHandler. func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt) @@ -76,7 +77,7 @@ func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.Rate }}) } -// Generic implements EventHandler +// Generic implements EventHandler. func (e *EnqueueRequestForObject) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go index 585c21e718..17401b1fdb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go @@ -51,26 +51,26 @@ type enqueueRequestsFromMapFunc struct { toRequests MapFunc } -// Create implements EventHandler +// Create implements EventHandler. func (e *enqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.mapAndEnqueue(q, evt.Object, reqs) } -// Update implements EventHandler +// Update implements EventHandler. func (e *enqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.mapAndEnqueue(q, evt.ObjectOld, reqs) e.mapAndEnqueue(q, evt.ObjectNew, reqs) } -// Delete implements EventHandler +// Delete implements EventHandler. func (e *enqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.mapAndEnqueue(q, evt.Object, reqs) } -// Generic implements EventHandler +// Generic implements EventHandler. func (e *enqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.mapAndEnqueue(q, evt.Object, reqs) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go index 8aa4ec52b2..63699893fc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go @@ -57,7 +57,7 @@ type EnqueueRequestForOwner struct { mapper meta.RESTMapper } -// Create implements EventHandler +// Create implements EventHandler. func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) @@ -66,7 +66,7 @@ func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateL } } -// Update implements EventHandler +// Update implements EventHandler. func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.ObjectOld, reqs) @@ -76,7 +76,7 @@ func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateL } } -// Delete implements EventHandler +// Delete implements EventHandler. func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) @@ -85,7 +85,7 @@ func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateL } } -// Generic implements EventHandler +// Generic implements EventHandler. func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) @@ -105,10 +105,9 @@ func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) } // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. if len(kinds) != 1 { - err := fmt.Errorf("Expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds) - log.Error(nil, "Expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds) + err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds) + log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds) return err - } // Cache the Group and Kind for the OwnerType e.groupKind = schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind} @@ -156,7 +155,7 @@ func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, // getOwnersReferences returns the OwnerReferences for an object as specified by the EnqueueRequestForOwner // - if IsController is true: only take the Controller OwnerReference (if found) -// - if IsController is false: take all OwnerReferences +// - if IsController is false: take all OwnerReferences. func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { if object == nil { return nil diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go index c9b93f8b97..8652d22d72 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go @@ -75,28 +75,28 @@ type Funcs struct { GenericFunc func(event.GenericEvent, workqueue.RateLimitingInterface) } -// Create implements EventHandler +// Create implements EventHandler. func (h Funcs) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { if h.CreateFunc != nil { h.CreateFunc(e, q) } } -// Delete implements EventHandler +// Delete implements EventHandler. func (h Funcs) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { if h.DeleteFunc != nil { h.DeleteFunc(e, q) } } -// Update implements EventHandler +// Update implements EventHandler. func (h Funcs) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { if h.UpdateFunc != nil { h.UpdateFunc(e, q) } } -// Generic implements EventHandler +// Generic implements EventHandler. func (h Funcs) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { if h.GenericFunc != nil { h.GenericFunc(e, q) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go index e7f4b1c279..bd1cc151af 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go @@ -35,7 +35,7 @@ type Handler struct { Checks map[string]Checker } -// checkStatus holds the output of a particular check +// checkStatus holds the output of a particular check. type checkStatus struct { name string healthy bool @@ -173,8 +173,7 @@ type CheckHandler struct { } func (h CheckHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { - err := h.Checker(req) - if err != nil { + if err := h.Checker(req); err != nil { http.Error(resp, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError) } else { fmt.Fprint(resp, "ok") @@ -184,10 +183,10 @@ func (h CheckHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { // Checker knows how to perform a health check. type Checker func(req *http.Request) error -// Ping returns true automatically when checked +// Ping returns true automatically when checked. var Ping Checker = func(_ *http.Request) error { return nil } -// getExcludedChecks extracts the health check names to be excluded from the query param +// getExcludedChecks extracts the health check names to be excluded from the query param. func getExcludedChecks(r *http.Request) sets.String { checks, found := r.URL.Query()["exclude"] if found { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go index f5024502d9..224d300b89 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -37,7 +37,7 @@ import ( var _ inject.Injector = &Controller{} -// Controller implements controller.Controller +// Controller implements controller.Controller. type Controller struct { // Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required. Name string @@ -94,14 +94,14 @@ type watchDescription struct { predicates []predicate.Predicate } -// Reconcile implements reconcile.Reconciler +// Reconcile implements reconcile.Reconciler. func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { log := c.Log.WithValues("name", req.Name, "namespace", req.Namespace) ctx = logf.IntoContext(ctx, log) return c.Do.Reconcile(ctx, req) } -// Watch implements controller.Controller +// Watch implements controller.Controller. func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prct ...predicate.Predicate) error { c.mu.Lock() defer c.mu.Unlock() @@ -131,7 +131,7 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc return src.Start(c.ctx, evthdler, c.Queue, prct...) } -// Start implements controller.Controller +// Start implements controller.Controller. func (c *Controller) Start(ctx context.Context) error { // use an IIFE to get proper lock handling // but lock outside to get proper handling of the queue shutdown @@ -295,13 +295,14 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { // RunInformersAndControllers the syncHandler, passing it the Namespace/Name string of the // resource to be synced. - if result, err := c.Do.Reconcile(ctx, req); err != nil { + result, err := c.Do.Reconcile(ctx, req) + switch { + case err != nil: c.Queue.AddRateLimited(req) ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc() ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc() log.Error(err, "Reconciler error") - return - } else if result.RequeueAfter > 0 { + case result.RequeueAfter > 0: // The result.RequeueAfter request will be lost, if it is returned // along with a non-nil error. But this is intended as // We need to drive to stable reconcile loops before queuing due @@ -309,18 +310,15 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { c.Queue.Forget(obj) c.Queue.AddAfter(req, result.RequeueAfter) ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Inc() - return - } else if result.Requeue { + case result.Requeue: c.Queue.AddRateLimited(req) ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Inc() - return + default: + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.Queue.Forget(obj) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Inc() } - - // Finally, if no error occurs we Forget this item so it does not - // get queued again until another change happens. - c.Queue.Forget(obj) - - ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Inc() } // GetLogger returns this controller's logger. @@ -328,13 +326,13 @@ func (c *Controller) GetLogger() logr.Logger { return c.Log } -// InjectFunc implement SetFields.Injector +// InjectFunc implement SetFields.Injector. func (c *Controller) InjectFunc(f inject.Func) error { c.SetFields = f return nil } -// updateMetrics updates prometheus metrics within the controller +// updateMetrics updates prometheus metrics within the controller. func (c *Controller) updateMetrics(reconcileTime time.Duration) { ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds()) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go index 126ded6609..baec669277 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go @@ -18,6 +18,7 @@ package metrics import ( "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" "sigs.k8s.io/controller-runtime/pkg/metrics" ) @@ -25,21 +26,21 @@ var ( // ReconcileTotal is a prometheus counter metrics which holds the total // number of reconciliations per controller. It has two labels. controller label refers // to the controller name and result label refers to the reconcile result i.e - // success, error, requeue, requeue_after + // success, error, requeue, requeue_after. ReconcileTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "controller_runtime_reconcile_total", Help: "Total number of reconciliations per controller", }, []string{"controller", "result"}) // ReconcileErrors is a prometheus counter metrics which holds the total - // number of errors from the Reconciler + // number of errors from the Reconciler. ReconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "controller_runtime_reconcile_errors_total", Help: "Total number of reconciliation errors per controller", }, []string{"controller"}) // ReconcileTime is a prometheus metric which keeps track of the duration - // of reconciliations + // of reconciliations. ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Name: "controller_runtime_reconcile_time_seconds", Help: "Length of time per reconciliation per controller", @@ -48,14 +49,14 @@ var ( }, []string{"controller"}) // WorkerCount is a prometheus metric which holds the number of - // concurrent reconciles per controller + // concurrent reconciles per controller. WorkerCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "controller_runtime_max_concurrent_reconciles", Help: "Maximum number of concurrent reconciles per controller", }, []string{"controller"}) // ActiveWorkers is a prometheus metric which holds the number - // of active workers per controller + // of active workers per controller. ActiveWorkers = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "controller_runtime_active_workers", Help: "Number of currently used workers per controller", @@ -70,8 +71,8 @@ func init() { WorkerCount, ActiveWorkers, // expose process metrics like CPU, Memory, file descriptor usage etc. - prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}), + collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), // expose Go runtime metrics like GC stats, memory stats etc. - prometheus.NewGoCollector(), + collectors.NewGoCollector(), ) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go index 5264da3cc1..7057f3dbe4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go @@ -20,7 +20,6 @@ import ( "errors" "fmt" - "k8s.io/apimachinery/pkg/api/meta" apimeta "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -28,7 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// FilterWithLabels returns a copy of the items in objs matching labelSel +// FilterWithLabels returns a copy of the items in objs matching labelSel. func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { outItems := make([]runtime.Object, 0, len(objs)) for _, obj := range objs { @@ -69,10 +68,10 @@ func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, scope := restmapping.Scope.Name() if scope == "" { - return false, errors.New("Scope cannot be identified. Empty scope returned") + return false, errors.New("scope cannot be identified, empty scope returned") } - if scope != meta.RESTScopeNameRoot { + if scope != apimeta.RESTScopeNameRoot { return true, nil } return false, nil diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go index 0173f6e2f4..55fd228690 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go @@ -31,7 +31,7 @@ import ( const inClusterNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" -// Options provides the required configuration to create a new resource lock +// Options provides the required configuration to create a new resource lock. type Options struct { // LeaderElection determines whether or not to use leader election when // starting the manager. @@ -104,8 +104,7 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op func getInClusterNamespace() (string, error) { // Check whether the namespace file exists. // If not, we are not running in cluster so can't guess the namespace. - _, err := os.Stat(inClusterNamespacePath) - if os.IsNotExist(err) { + if _, err := os.Stat(inClusterNamespacePath); os.IsNotExist(err) { return "", fmt.Errorf("not running in-cluster, please specify LeaderElectionNamespace") } else if err != nil { return "", fmt.Errorf("error checking namespace file: %w", err) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go index ed18ae6d11..bbd9c9c756 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go @@ -47,7 +47,7 @@ func (p *loggerPromise) WithName(l *DelegatingLogger, name string) *loggerPromis return res } -// WithValues provides a new Logger with the tags appended +// WithValues provides a new Logger with the tags appended. func (p *loggerPromise) WithValues(l *DelegatingLogger, tags ...interface{}) *loggerPromise { res := &loggerPromise{ logger: l, @@ -74,7 +74,7 @@ func (p *loggerPromise) V(l *DelegatingLogger, level int) *loggerPromise { return res } -// Fulfill instantiates the Logger with the provided logger +// Fulfill instantiates the Logger with the provided logger. func (p *loggerPromise) Fulfill(parentLogger logr.Logger) { var logger = parentLogger if p.name != nil { @@ -163,7 +163,7 @@ func (l *DelegatingLogger) V(level int) logr.Logger { return res } -// WithName provides a new Logger with the name appended +// WithName provides a new Logger with the name appended. func (l *DelegatingLogger) WithName(name string) logr.Logger { l.lock.RLock() defer l.lock.RUnlock() @@ -179,7 +179,7 @@ func (l *DelegatingLogger) WithName(name string) logr.Logger { return res } -// WithValues provides a new Logger with the tags appended +// WithValues provides a new Logger with the tags appended. func (l *DelegatingLogger) WithValues(tags ...interface{}) logr.Logger { l.lock.RLock() defer l.lock.RUnlock() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go index 4c56f3427b..09a5a02eb6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go @@ -29,32 +29,32 @@ type NullLogger struct{} var _ logr.Logger = NullLogger{} -// Info implements logr.InfoLogger +// Info implements logr.InfoLogger. func (NullLogger) Info(_ string, _ ...interface{}) { // Do nothing. } -// Enabled implements logr.InfoLogger +// Enabled implements logr.InfoLogger. func (NullLogger) Enabled() bool { return false } -// Error implements logr.Logger +// Error implements logr.Logger. func (NullLogger) Error(_ error, _ string, _ ...interface{}) { // Do nothing. } -// V implements logr.Logger -func (log NullLogger) V(_ int) logr.InfoLogger { +// V implements logr.Logger. +func (log NullLogger) V(_ int) logr.Logger { return log } -// WithName implements logr.Logger +// WithName implements logr.Logger. func (log NullLogger) WithName(_ string) logr.Logger { return log } -// WithValues implements logr.Logger +// WithValues implements logr.Logger. func (log NullLogger) WithValues(_ ...interface{}) logr.Logger { return log } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go index d4ea12cebf..3012fdd411 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go @@ -23,7 +23,7 @@ import ( ) // KubeAPIWarningLoggerOptions controls the behavior -// of a rest.WarningHandler constructed using NewKubeAPIWarningLogger() +// of a rest.WarningHandler constructed using NewKubeAPIWarningLogger(). type KubeAPIWarningLoggerOptions struct { // Deduplicate indicates a given warning message should only be written once. // Setting this to true in a long-running process handling many warnings can diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index c16a5bb5f3..5f85e10c90 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -29,7 +29,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - utilerrors "k8s.io/apimachinery/pkg/util/errors" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/rest" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" @@ -251,8 +251,7 @@ func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Ha cm.mu.Lock() defer cm.mu.Unlock() - _, found := cm.metricsExtraHandlers[path] - if found { + if _, found := cm.metricsExtraHandlers[path]; found { return fmt.Errorf("can't register extra handler by duplicate path %q on metrics http server", path) } @@ -261,7 +260,7 @@ func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Ha return nil } -// AddHealthzCheck allows you to add Healthz checker +// AddHealthzCheck allows you to add Healthz checker. func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error { cm.mu.Lock() defer cm.mu.Unlock() @@ -282,7 +281,7 @@ func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) return nil } -// AddReadyzCheck allows you to add Readyz checker +// AddReadyzCheck allows you to add Readyz checker. func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) error { cm.mu.Lock() defer cm.mu.Unlock() @@ -451,7 +450,7 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { // Utilerrors.Aggregate allows to use errors.Is for all contained errors // whereas fmt.Errorf allows wrapping at most one error which means the // other one can not be found anymore. - err = utilerrors.NewAggregate([]error{err, stopErr}) + err = kerrors.NewAggregate([]error{err, stopErr}) } else { err = stopErr } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 843919427d..903e3e47f9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -95,7 +95,7 @@ type Manager interface { GetControllerOptions() v1alpha1.ControllerConfigurationSpec } -// Options are the arguments for creating a new Manager +// Options are the arguments for creating a new Manager. type Options struct { // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better @@ -292,7 +292,7 @@ type Runnable interface { // until it's done running. type RunnableFunc func(context.Context) error -// Start implements Runnable +// Start implements Runnable. func (r RunnableFunc) Start(ctx context.Context) error { return r(ctx) } @@ -319,7 +319,7 @@ func New(config *rest.Config, options Options) (Manager, error) { clusterOptions.NewClient = options.NewClient clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor clusterOptions.DryRunClient = options.DryRunClient - clusterOptions.EventBroadcaster = options.EventBroadcaster + clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck }) if err != nil { return nil, err @@ -393,7 +393,7 @@ func New(config *rest.Config, options Options) (Manager, error) { // AndFrom will use a supplied type and convert to Options // any options already set on Options will be ignored, this is used to allow -// cli flags to override anything specified in the config file +// cli flags to override anything specified in the config file. func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) { if inj, wantsScheme := loader.(inject.Scheme); wantsScheme { err := inj.InjectScheme(o.Scheme) @@ -458,7 +458,7 @@ func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, return o, nil } -// AndFromOrDie will use options.AndFrom() and will panic if there are errors +// AndFromOrDie will use options.AndFrom() and will panic if there are errors. func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options { o, err := o.AndFrom(loader) if err != nil { @@ -468,7 +468,7 @@ func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Opti } func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigurationSpec) Options { - if o.LeaderElection == false && obj.LeaderElection.LeaderElect != nil { + if !o.LeaderElection && obj.LeaderElection.LeaderElect != nil { o.LeaderElection = *obj.LeaderElection.LeaderElect } @@ -499,7 +499,7 @@ func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigura return o } -// defaultHealthProbeListener creates the default health probes listener bound to the given address +// defaultHealthProbeListener creates the default health probes listener bound to the given address. func defaultHealthProbeListener(addr string) (net.Listener, error) { if addr == "" || addr == "0" { return nil, nil @@ -512,9 +512,8 @@ func defaultHealthProbeListener(addr string) (net.Listener, error) { return ln, nil } -// setOptionsDefaults set default values for Options fields +// setOptionsDefaults set default values for Options fields. func setOptionsDefaults(options Options) Options { - // Allow newResourceLock to be mocked if options.newResourceLock == nil { options.newResourceLock = leaderelection.NewResourceLock diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go index 3df9b0b0b0..90754269dd 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go @@ -51,7 +51,7 @@ const ( ) var ( - // client metrics + // client metrics. requestLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Subsystem: RestClientSubsystem, Name: LatencyKey, @@ -65,7 +65,7 @@ var ( Help: "Number of HTTP requests, partitioned by status code, method, and host.", }, []string{"code", "method", "host"}) - // reflector metrics + // reflector metrics. // TODO(directxman12): update these to be histograms once the metrics overhaul KEP // PRs start landing. @@ -124,7 +124,7 @@ func init() { registerReflectorMetrics() } -// registerClientMetrics sets up the client latency metrics from client-go +// registerClientMetrics sets up the client latency metrics from client-go. func registerClientMetrics() { // register the metrics with our registry Registry.MustRegister(requestLatency) @@ -137,7 +137,7 @@ func registerClientMetrics() { }) } -// registerReflectorMetrics sets up reflector (reconcile) loop metrics +// registerReflectorMetrics sets up reflector (reconcile) loop metrics. func registerReflectorMetrics() { Registry.MustRegister(listsTotal) Registry.MustRegister(listsDuration) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go index 9ba3d600ea..ce17124d53 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go @@ -26,5 +26,5 @@ type RegistererGatherer interface { } // Registry is a prometheus registry for storing metrics within the -// controller-runtime +// controller-runtime. var Registry RegistererGatherer = prometheus.NewRegistry() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go index bab2ce346e..fc59d89ba3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go @@ -65,7 +65,7 @@ type Funcs struct { GenericFunc func(event.GenericEvent) bool } -// Create implements Predicate +// Create implements Predicate. func (p Funcs) Create(e event.CreateEvent) bool { if p.CreateFunc != nil { return p.CreateFunc(e) @@ -73,7 +73,7 @@ func (p Funcs) Create(e event.CreateEvent) bool { return true } -// Delete implements Predicate +// Delete implements Predicate. func (p Funcs) Delete(e event.DeleteEvent) bool { if p.DeleteFunc != nil { return p.DeleteFunc(e) @@ -81,7 +81,7 @@ func (p Funcs) Delete(e event.DeleteEvent) bool { return true } -// Update implements Predicate +// Update implements Predicate. func (p Funcs) Update(e event.UpdateEvent) bool { if p.UpdateFunc != nil { return p.UpdateFunc(e) @@ -89,7 +89,7 @@ func (p Funcs) Update(e event.UpdateEvent) bool { return true } -// Generic implements Predicate +// Generic implements Predicate. func (p Funcs) Generic(e event.GenericEvent) bool { if p.GenericFunc != nil { return p.GenericFunc(e) @@ -117,12 +117,12 @@ func NewPredicateFuncs(filter func(object client.Object) bool) Funcs { } } -// ResourceVersionChangedPredicate implements a default update predicate function on resource version change +// ResourceVersionChangedPredicate implements a default update predicate function on resource version change. type ResourceVersionChangedPredicate struct { Funcs } -// Update implements default UpdateEvent filter for validating resource version change +// Update implements default UpdateEvent filter for validating resource version change. func (ResourceVersionChangedPredicate) Update(e event.UpdateEvent) bool { if e.ObjectOld == nil { log.Error(nil, "Update event has no old object to update", "event", e) @@ -156,7 +156,7 @@ type GenerationChangedPredicate struct { Funcs } -// Update implements default UpdateEvent filter for validating generation change +// Update implements default UpdateEvent filter for validating generation change. func (GenerationChangedPredicate) Update(e event.UpdateEvent) bool { if e.ObjectOld == nil { log.Error(nil, "Update event has no old object to update", "event", e) @@ -186,7 +186,7 @@ type AnnotationChangedPredicate struct { Funcs } -// Update implements default UpdateEvent filter for validating annotation change +// Update implements default UpdateEvent filter for validating annotation change. func (AnnotationChangedPredicate) Update(e event.UpdateEvent) bool { if e.ObjectOld == nil { log.Error(nil, "Update event has no old object to update", "event", e) @@ -216,7 +216,7 @@ type LabelChangedPredicate struct { Funcs } -// Update implements default UpdateEvent filter for checking label change +// Update implements default UpdateEvent filter for checking label change. func (LabelChangedPredicate) Update(e event.UpdateEvent) bool { if e.ObjectOld == nil { log.Error(nil, "Update event has no old object to update", "event", e) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go index 8874c75727..c8c56ba817 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go @@ -29,7 +29,7 @@ import ( ) // Cache is used by the ControllerManager to inject Cache into Sources, EventHandlers, Predicates, and -// Reconciles +// Reconciles. type Cache interface { InjectCache(cache cache.Cache) error } @@ -49,7 +49,7 @@ type APIReader interface { } // APIReaderInto will set APIReader on i and return the result if it implements APIReaderInto. -// Returns false if i does not implement APIReader +// Returns false if i does not implement APIReader. func APIReaderInto(reader client.Reader, i interface{}) (bool, error) { if s, ok := i.(APIReader); ok { return true, s.InjectAPIReader(reader) @@ -58,7 +58,7 @@ func APIReaderInto(reader client.Reader, i interface{}) (bool, error) { } // Config is used by the ControllerManager to inject Config into Sources, EventHandlers, Predicates, and -// Reconciles +// Reconciles. type Config interface { InjectConfig(*rest.Config) error } @@ -73,7 +73,7 @@ func ConfigInto(config *rest.Config, i interface{}) (bool, error) { } // Client is used by the ControllerManager to inject client into Sources, EventHandlers, Predicates, and -// Reconciles +// Reconciles. type Client interface { InjectClient(client.Client) error } @@ -88,7 +88,7 @@ func ClientInto(client client.Client, i interface{}) (bool, error) { } // Scheme is used by the ControllerManager to inject Scheme into Sources, EventHandlers, Predicates, and -// Reconciles +// Reconciles. type Scheme interface { InjectScheme(scheme *runtime.Scheme) error } @@ -117,7 +117,7 @@ func StopChannelInto(stop <-chan struct{}, i interface{}) (bool, error) { return false, nil } -// Mapper is used to inject the rest mapper to components that may need it +// Mapper is used to inject the rest mapper to components that may need it. type Mapper interface { InjectMapper(meta.RESTMapper) error } @@ -134,7 +134,7 @@ func MapperInto(mapper meta.RESTMapper, i interface{}) (bool, error) { // Func injects dependencies into i. type Func func(i interface{}) error -// Injector is used by the ControllerManager to inject Func into Controllers +// Injector is used by the ControllerManager to inject Func into Controllers. type Injector interface { InjectFunc(f Func) error } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go index 33c4c41348..f0cfe212ed 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go @@ -33,14 +33,14 @@ var log = logf.RuntimeLog.WithName("source").WithName("EventHandler") var _ cache.ResourceEventHandler = EventHandler{} -// EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface +// EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface. type EventHandler struct { EventHandler handler.EventHandler Queue workqueue.RateLimitingInterface Predicates []predicate.Predicate } -// OnAdd creates CreateEvent and calls Create on EventHandler +// OnAdd creates CreateEvent and calls Create on EventHandler. func (e EventHandler) OnAdd(obj interface{}) { c := event.CreateEvent{} @@ -63,7 +63,7 @@ func (e EventHandler) OnAdd(obj interface{}) { e.EventHandler.Create(c, e.Queue) } -// OnUpdate creates UpdateEvent and calls Update on EventHandler +// OnUpdate creates UpdateEvent and calls Update on EventHandler. func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { u := event.UpdateEvent{} @@ -94,7 +94,7 @@ func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { e.EventHandler.Update(u, e.Queue) } -// OnDelete creates DeleteEvent and calls Delete on EventHandler +// OnDelete creates DeleteEvent and calls Delete on EventHandler. func (e EventHandler) OnDelete(obj interface{}) { d := event.DeleteEvent{} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go index adabbaf917..a63b37c443 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go @@ -66,7 +66,7 @@ type SyncingSource interface { // NewKindWithCache creates a Source without InjectCache, so that it is assured that the given cache is used // and not overwritten. It can be used to watch objects in a different cluster by passing the cache -// from that other cluster +// from that other cluster. func NewKindWithCache(object client.Object, cache cache.Cache) SyncingSource { return &kindWithCache{kind: Kind{Type: object, cache: cache}} } @@ -84,7 +84,7 @@ func (ks *kindWithCache) WaitForSync(ctx context.Context) error { return ks.kind.WaitForSync(ctx) } -// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create) +// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). type Kind struct { // Type is the type of object to watch. e.g. &v1.Pod{} Type client.Object @@ -104,7 +104,6 @@ var _ SyncingSource = &Kind{} // to enqueue reconcile.Requests. func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, prct ...predicate.Predicate) error { - // Type should have been specified by the user. if ks.Type == nil { return fmt.Errorf("must specify Kind.Type") @@ -146,7 +145,7 @@ func (ks *Kind) String() string { if ks.Type != nil && ks.Type.GetObjectKind() != nil { return fmt.Sprintf("kind source: %v", ks.Type.GetObjectKind().GroupVersionKind().String()) } - return fmt.Sprintf("kind source: unknown GVK") + return "kind source: unknown GVK" } // WaitForSync implements SyncingSource to allow controllers to wait with starting @@ -307,7 +306,7 @@ func (cs *Channel) syncLoop(ctx context.Context) { } } -// Informer is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create) +// Informer is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). type Informer struct { // Informer is the controller-runtime Informer Informer cache.Informer @@ -319,7 +318,6 @@ var _ Source = &Informer{} // to enqueue reconcile.Requests. func (is *Informer) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, prct ...predicate.Predicate) error { - // Informer should have been specified by the user. if is.Informer == nil { return fmt.Errorf("must specify Informer.Informer") @@ -335,10 +333,10 @@ func (is *Informer) String() string { var _ Source = Func(nil) -// Func is a function that implements Source +// Func is a function that implements Source. type Func func(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error -// Start implements Source +// Start implements Source. func (f Func) Start(ctx context.Context, evt handler.EventHandler, queue workqueue.RateLimitingInterface, pr ...predicate.Predicate) error { return f(ctx, evt, queue, pr...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go index 9583b5e9ac..c7cb71b755 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go @@ -31,7 +31,7 @@ type Decoder struct { codecs serializer.CodecFactory } -// NewDecoder creates a Decoder given the runtime.Scheme +// NewDecoder creates a Decoder given the runtime.Scheme. func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil } @@ -64,11 +64,7 @@ func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) er } if unstructuredInto, isUnstructured := into.(*unstructured.Unstructured); isUnstructured { // unmarshal into unstructured's underlying object to avoid calling the decoder - if err := json.Unmarshal(rawObj.Raw, &unstructuredInto.Object); err != nil { - return err - } - - return nil + return json.Unmarshal(rawObj.Raw, &unstructuredInto.Object) } deserializer := d.codecs.UniversalDeserializer() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go index 8b255894ba..0d9aa7a838 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -// Defaulter defines functions for setting defaults on resources +// Defaulter defines functions for setting defaults on resources. type Defaulter interface { runtime.Object Default() @@ -58,8 +58,7 @@ func (h *mutatingHandler) Handle(ctx context.Context, req Request) Response { // Get the object in the request obj := h.defaulter.DeepCopyObject().(Defaulter) - err := h.decoder.Decode(req, obj) - if err != nil { + if err := h.decoder.Decode(req, obj); err != nil { return Errored(http.StatusBadRequest, err) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go index 052f803161..3fa8872ff2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go @@ -68,8 +68,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // verify the content type is accurate - contentType := r.Header.Get("Content-Type") - if contentType != "application/json" { + if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { err = fmt.Errorf("contentType=%s, expected application/json", contentType) wh.log.Error(err, "unable to process a request with an unknown content type", "content type", contentType) reviewResponse = Errored(http.StatusBadRequest, err) @@ -125,8 +124,7 @@ func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, admRevGVK // writeAdmissionResponse writes ar to w. func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { - err := json.NewEncoder(w).Encode(ar) - if err != nil { + if err := json.NewEncoder(w).Encode(ar); err != nil { wh.log.Error(err, "unable to encode the response") wh.writeResponse(w, Errored(http.StatusInternalServerError, err)) } else { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go index 926d4a5bd1..4b27e75ede 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go @@ -22,11 +22,11 @@ import ( "net/http" v1 "k8s.io/api/admission/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" ) -// Validator defines functions for validating an operation +// Validator defines functions for validating an operation. type Validator interface { runtime.Object ValidateCreate() error @@ -70,7 +70,7 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { err = obj.ValidateCreate() if err != nil { - var apiStatus errors.APIStatus + var apiStatus apierrors.APIStatus if goerrors.As(err, &apiStatus) { return validationResponseFromStatus(false, apiStatus.Status()) } @@ -92,7 +92,7 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { err = obj.ValidateUpdate(oldObj) if err != nil { - var apiStatus errors.APIStatus + var apiStatus apierrors.APIStatus if goerrors.As(err, &apiStatus) { return validationResponseFromStatus(false, apiStatus.Status()) } @@ -110,7 +110,7 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { err = obj.ValidateDelete() if err != nil { - var apiStatus errors.APIStatus + var apiStatus apierrors.APIStatus if goerrors.As(err, &apiStatus) { return validationResponseFromStatus(false, apiStatus.Status()) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go index d8c7721501..cf7dbcf68d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go @@ -133,8 +133,8 @@ type Webhook struct { } // InjectLogger gets a handle to a logging instance, hopefully with more info about this particular webhook. -func (w *Webhook) InjectLogger(l logr.Logger) error { - w.log = l +func (wh *Webhook) InjectLogger(l logr.Logger) error { + wh.log = l return nil } @@ -142,10 +142,10 @@ func (w *Webhook) InjectLogger(l logr.Logger) error { // If the webhook is mutating type, it delegates the AdmissionRequest to each handler and merge the patches. // If the webhook is validating type, it delegates the AdmissionRequest to each handler and // deny the request if anyone denies. -func (w *Webhook) Handle(ctx context.Context, req Request) Response { - resp := w.Handler.Handle(ctx, req) +func (wh *Webhook) Handle(ctx context.Context, req Request) Response { + resp := wh.Handler.Handle(ctx, req) if err := resp.Complete(req); err != nil { - w.log.Error(err, "unable to encode response") + wh.log.Error(err, "unable to encode response") return Errored(http.StatusInternalServerError, errUnableToEncodeResponse) } @@ -153,19 +153,19 @@ func (w *Webhook) Handle(ctx context.Context, req Request) Response { } // InjectScheme injects a scheme into the webhook, in order to construct a Decoder. -func (w *Webhook) InjectScheme(s *runtime.Scheme) error { +func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { // TODO(directxman12): we should have a better way to pass this down var err error - w.decoder, err = NewDecoder(s) + wh.decoder, err = NewDecoder(s) if err != nil { return err } // inject the decoder here too, just in case the order of calling this is not // scheme first, then inject func - if w.Handler != nil { - if _, err := InjectDecoderInto(w.GetDecoder(), w.Handler); err != nil { + if wh.Handler != nil { + if _, err := InjectDecoderInto(wh.GetDecoder(), wh.Handler); err != nil { return err } } @@ -175,12 +175,12 @@ func (w *Webhook) InjectScheme(s *runtime.Scheme) error { // GetDecoder returns a decoder to decode the objects embedded in admission requests. // It may be nil if we haven't received a scheme to use to determine object types yet. -func (w *Webhook) GetDecoder() *Decoder { - return w.decoder +func (wh *Webhook) GetDecoder() *Decoder { + return wh.decoder } // InjectFunc injects the field setter into the webhook. -func (w *Webhook) InjectFunc(f inject.Func) error { +func (wh *Webhook) InjectFunc(f inject.Func) error { // inject directly into the handlers. It would be more correct // to do this in a sync.Once in Handle (since we don't have some // other start/finalize-type method), but it's more efficient to @@ -200,14 +200,14 @@ func (w *Webhook) InjectFunc(f inject.Func) error { return err } - if _, err := InjectDecoderInto(w.GetDecoder(), target); err != nil { + if _, err := InjectDecoderInto(wh.GetDecoder(), target); err != nil { return err } return nil } - return setFields(w.Handler) + return setFields(wh.Handler) } // StandaloneOptions let you configure a StandaloneWebhook. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go index 276784efb2..1a831016af 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go @@ -23,10 +23,10 @@ import ( // define some aliases for common bits of the webhook functionality -// Defaulter defines functions for setting defaults on resources +// Defaulter defines functions for setting defaults on resources. type Defaulter = admission.Defaulter -// Validator defines functions for validating an operation +// Validator defines functions for validating an operation. type Validator = admission.Validator // AdmissionRequest defines the input for an admission handler. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go index cdd34c9660..d2338d0b77 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -28,10 +28,12 @@ import ( "path/filepath" "strconv" "sync" + "time" "k8s.io/apimachinery/pkg/runtime" kscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" ) @@ -70,6 +72,10 @@ type Server struct { // Defaults to "", which means server does not verify client's certificate. ClientCAName string + // TLSVersion is the minimum version of TLS supported. Accepts + // "", "1.0", "1.1", "1.2" and "1.3" only ("" is equivalent to "1.0" for backwards compatibility) + TLSMinVersion string + // WebhookMux is the multiplexer that handles different webhooks. WebhookMux *http.ServeMux @@ -83,6 +89,10 @@ type Server struct { // defaultingOnce ensures that the default fields are only ever set once. defaultingOnce sync.Once + // started is set to true immediately before the server is started + // and thus can be used to check if the server has been started + started bool + // mu protects access to the webhook map & setFields for Start, Register, etc mu sync.Mutex } @@ -124,8 +134,7 @@ func (s *Server) Register(path string, hook http.Handler) { defer s.mu.Unlock() s.defaultingOnce.Do(s.setDefaults) - _, found := s.webhooks[path] - if found { + if _, found := s.webhooks[path]; found { panic(fmt.Errorf("can't register duplicate path: %v", path)) } // TODO(directxman12): call setfields if we've already started the server @@ -175,6 +184,26 @@ func (s *Server) StartStandalone(ctx context.Context, scheme *runtime.Scheme) er return s.Start(ctx) } +// tlsVersion converts from human-readable TLS version (for example "1.1") +// to the values accepted by tls.Config (for example 0x301). +func tlsVersion(version string) (uint16, error) { + switch version { + // default is previous behaviour + case "": + return tls.VersionTLS10, nil + case "1.0": + return tls.VersionTLS10, nil + case "1.1": + return tls.VersionTLS11, nil + case "1.2": + return tls.VersionTLS12, nil + case "1.3": + return tls.VersionTLS13, nil + default: + return 0, fmt.Errorf("invalid TLSMinVersion %v: expects 1.0, 1.1, 1.2, 1.3 or empty", version) + } +} + // Start runs the server. // It will install the webhook related resources depend on the server configuration. func (s *Server) Start(ctx context.Context) error { @@ -197,9 +226,15 @@ func (s *Server) Start(ctx context.Context) error { } }() - cfg := &tls.Config{ + tlsMinVersion, err := tlsVersion(s.TLSMinVersion) + if err != nil { + return err + } + + cfg := &tls.Config{ //nolint:gosec NextProtos: []string{"h2"}, GetCertificate: certWatcher.GetCertificate, + MinVersion: tlsMinVersion, } // load CA to verify client certificate @@ -219,7 +254,7 @@ func (s *Server) Start(ctx context.Context) error { cfg.ClientAuth = tls.RequireAndVerifyClientCert } - listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(int(s.Port))), cfg) + listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), cfg) if err != nil { return err } @@ -243,6 +278,9 @@ func (s *Server) Start(ctx context.Context) error { close(idleConnsClosed) }() + s.mu.Lock() + s.started = true + s.mu.Unlock() if err := srv.Serve(listener); err != nil && err != http.ErrServerClosed { return err } @@ -251,6 +289,34 @@ func (s *Server) Start(ctx context.Context) error { return nil } +// StartedChecker returns an healthz.Checker which is healthy after the +// server has been started. +func (s *Server) StartedChecker() healthz.Checker { + config := &tls.Config{ + InsecureSkipVerify: true, // nolint:gosec // config is used to connect to our own webhook port. + } + return func(req *http.Request) error { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.started { + return fmt.Errorf("webhook server has not been started yet") + } + + d := &net.Dialer{Timeout: 10 * time.Second} + conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), config) + if err != nil { + return fmt.Errorf("webhook server is not reachable: %v", err) + } + + if err := conn.Close(); err != nil { + return fmt.Errorf("webhook server is not reachable: closing connection: %v", err) + } + + return nil + } +} + // InjectFunc injects the field setter into the server. func (s *Server) InjectFunc(f inject.Func) error { s.setFields = f From 9fe387d9afcd8a921a16a4ec997431ee3341cc2b Mon Sep 17 00:00:00 2001 From: Aditya Dani Date: Wed, 5 Oct 2022 22:34:53 -0700 Subject: [PATCH 34/97] PWX-27273: Only set the ResourceMigrationFinishTs after purging the resources. - This change partially reverts - https://github.com/libopenstorage/stork/pull/1159 - Instead of setting the migration status after purging the resources, it sets it before so that a resource migration status of "Purged" does not affect the overall migration status. - The ResourceMigrationFinishTimestamp is still set after purging is done. --- pkg/migration/controllers/migration.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/migration/controllers/migration.go b/pkg/migration/controllers/migration.go index c181c1915d..b823d973e7 100644 --- a/pkg/migration/controllers/migration.go +++ b/pkg/migration/controllers/migration.go @@ -1006,6 +1006,15 @@ func (m *MigrationController) migrateResources(migration *stork_api.Migration, v return err } + migration.Status.Stage = stork_api.MigrationStageFinal + migration.Status.Status = stork_api.MigrationStatusSuccessful + for _, resource := range migration.Status.Resources { + if resource.Status != stork_api.MigrationStatusSuccessful { + migration.Status.Status = stork_api.MigrationStatusPartialSuccess + break + } + } + if *migration.Spec.PurgeDeletedResources { if err := m.purgeMigratedResources(migration, resourceCollectorOpts); err != nil { message := fmt.Sprintf("Error cleaning up resources: %v", err) @@ -1019,15 +1028,6 @@ func (m *MigrationController) migrateResources(migration *stork_api.Migration, v } migration.Status.ResourceMigrationFinishTimestamp = metav1.Now() - migration.Status.Stage = stork_api.MigrationStageFinal - migration.Status.Status = stork_api.MigrationStatusSuccessful - for _, resource := range migration.Status.Resources { - if resource.Status != stork_api.MigrationStatusSuccessful { - migration.Status.Status = stork_api.MigrationStatusPartialSuccess - break - } - } - migration.Status.FinishTimestamp = metav1.Now() err = m.updateMigrationCR(context.TODO(), migration) if err != nil { From 54803434e76d841218e0c06f64299d4cc4062609 Mon Sep 17 00:00:00 2001 From: Ram Date: Tue, 20 Sep 2022 18:53:49 +0530 Subject: [PATCH 35/97] Set QPS and BURST rate for sched-ops client Signed-off-by: Ram --- .travis.yml | 2 +- cmd/stork/stork.go | 11 +++++++++-- pkg/migration/controllers/migration.go | 3 +++ 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 029a4b8d8c..cd89ca2a0e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,7 @@ language: go go: - 1.17.3 before_install: - - sudo apt-get update -yq + - sudo apt-get update -yq || true - sudo apt-get install go-md2man -y - sudo apt-get install -y awscli cache: diff --git a/cmd/stork/stork.go b/cmd/stork/stork.go index 03972c19ca..fa41ad5b5b 100644 --- a/cmd/stork/stork.go +++ b/cmd/stork/stork.go @@ -45,6 +45,7 @@ import ( "github.com/portworx/kdmp/pkg/drivers" "github.com/portworx/kdmp/pkg/jobratelimit" kdmpversion "github.com/portworx/kdmp/pkg/version" + "github.com/portworx/sched-ops/k8s/common" schedops "github.com/portworx/sched-ops/k8s/core" "github.com/prometheus/client_golang/prometheus/promhttp" log "github.com/sirupsen/logrus" @@ -185,12 +186,12 @@ func main() { }, cli.IntFlag{ Name: "k8s-api-qps", - Value: 100, + Value: 1000, Usage: "Restrict number of k8s api requests from stork (default: 100 QPS)", }, cli.IntFlag{ Name: "k8s-api-burst", - Value: 100, + Value: 2000, Usage: "Restrict number of k8s api requests from stork (default: 100 Burst)", }, cli.BoolTFlag{ @@ -420,6 +421,12 @@ func runStork(mgr manager.Manager, d volume.Driver, recorder record.EventRecorde if err := resourceCollector.Init(nil); err != nil { log.Fatalf("Error initializing ResourceCollector: %v", err) } + if err := os.Setenv(common.BurstRate, strconv.Itoa(burst)); err != nil { + log.Fatalf("Error setting Burst Rate: %v", err) + } + if err := os.Setenv(common.QPSRate, strconv.Itoa(qps)); err != nil { + log.Fatalf("Error setting Burst Rate: %v", err) + } adminNamespace := c.String("admin-namespace") if adminNamespace == "" { adminNamespace = c.String("migration-admin-namespace") diff --git a/pkg/migration/controllers/migration.go b/pkg/migration/controllers/migration.go index b823d973e7..f2f2f309ab 100644 --- a/pkg/migration/controllers/migration.go +++ b/pkg/migration/controllers/migration.go @@ -804,6 +804,7 @@ func (m *MigrationController) migrateVolumes(migration *stork_api.Migration, ter return err } } else { + logrus.Infof("Migrating pv and pvcs for volume only migration") err := m.migrateResources(migration, true) if err != nil { log.MigrationLog(migration).Errorf("Error migrating resources: %v", err) @@ -1660,6 +1661,7 @@ func (m *MigrationController) applyResources( updatedObjects = append(updatedObjects, o) } } + logrus.Infof("Recreating pv and pvc object") // create/update pv object with updated policy for _, obj := range pvObjects { var pv v1.PersistentVolume @@ -2224,6 +2226,7 @@ func (m *MigrationController) getVolumeOnlyMigrationResources( return resources, err } resources = append(resources, objects.Items...) + // add pvcs to resource list resource = metav1.APIResource{ Name: "persistentvolumeclaims", From 65e65f96c3e6e74622c3f2e855712da4928b1671 Mon Sep 17 00:00:00 2001 From: Ram Date: Mon, 10 Oct 2022 20:41:04 +0530 Subject: [PATCH 36/97] vendor updates sched-ops Signed-off-by: Ram --- go.mod | 3 +- go.sum | 2 ++ .../admissionregistration.go | 6 +++- .../k8s/apiextensions/apiextensions.go | 6 +++- .../portworx/sched-ops/k8s/apps/apps.go | 6 +++- .../sched-ops/k8s/autopilot/autopilot.go | 6 +++- .../portworx/sched-ops/k8s/batch/batch.go | 6 +++- .../portworx/sched-ops/k8s/common/utils.go | 32 +++++++++++++++++++ .../portworx/sched-ops/k8s/core/core.go | 6 +++- .../portworx/sched-ops/k8s/dynamic/dynamic.go | 6 +++- .../externalsnapshotter.go | 6 +++- .../k8s/externalstorage/externalstorage.go | 6 +++- .../portworx/sched-ops/k8s/kdmp/kdmp.go | 6 +++- .../sched-ops/k8s/networking/networking.go | 6 +++- .../sched-ops/k8s/openshift/openshift.go | 6 +++- .../sched-ops/k8s/operator/operator.go | 6 +++- .../portworx/sched-ops/k8s/policy/policy.go | 6 +++- .../sched-ops/k8s/prometheus/prometheus.go | 6 +++- .../portworx/sched-ops/k8s/rbac/rbac.go | 6 +++- .../portworx/sched-ops/k8s/storage/storage.go | 5 +++ .../portworx/sched-ops/k8s/stork/stork.go | 6 +++- vendor/modules.txt | 4 +-- 22 files changed, 128 insertions(+), 20 deletions(-) create mode 100644 vendor/github.com/portworx/sched-ops/k8s/common/utils.go diff --git a/go.mod b/go.mod index 534425a34a..f31faa0219 100644 --- a/go.mod +++ b/go.mod @@ -77,7 +77,8 @@ replace ( github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc7 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 github.com/libopenstorage/autopilot-api => github.com/libopenstorage/autopilot-api v0.6.1-0.20210301232050-ca2633c6e114 github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20220707215604-afbea03c04c5 - github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 + //github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 + github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.1 diff --git a/go.sum b/go.sum index d99c6b91ae..98f42c56de 100644 --- a/go.sum +++ b/go.sum @@ -1434,6 +1434,8 @@ github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca h1:jrjwi github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca/go.mod h1:0IQvado0rnmbRMORaCqCDrrzjBrX5sU+Sz2+vQwEsjM= github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 h1:4VuOzgXy6EU6zrVTEP4wlAaBUwdGA2jY1ckyjthTvb8= github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496/go.mod h1:/xDBMzUV30kbdQYaPdAFcAYqEada6ZnWi4zt4KzFzAI= +github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a h1:qzoPM67cqkX6qJKzd1Wmbt9hZkY5kFYlqnbZMfG8qU0= +github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a/go.mod h1:/xDBMzUV30kbdQYaPdAFcAYqEada6ZnWi4zt4KzFzAI= github.com/portworx/talisman v0.0.0-20210302012732-8af4564777f7/go.mod h1:e8a6uFpSbOlRpZQlW9aXYogC+GWAo065G0RL9hDkD4Q= github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 h1:P4Lo6jDUUKglz7rkqlK8Hg4gLXqIIrgQaEeWxcXrV8U= github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1/go.mod h1:I2wJjwLvCub+L1eNHWyHIIe6SrCreMVgwym4dCsR1WE= diff --git a/vendor/github.com/portworx/sched-ops/k8s/admissionregistration/admissionregistration.go b/vendor/github.com/portworx/sched-ops/k8s/admissionregistration/admissionregistration.go index 6478ea09c5..edaf7338b6 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/admissionregistration/admissionregistration.go +++ b/vendor/github.com/portworx/sched-ops/k8s/admissionregistration/admissionregistration.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" apiadmissionsclientv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" apiadmissionsclient "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" @@ -146,7 +147,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.admission, err = apiadmissionsclient.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/apiextensions/apiextensions.go b/vendor/github.com/portworx/sched-ops/k8s/apiextensions/apiextensions.go index 3704406441..b26a6f29c0 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/apiextensions/apiextensions.go +++ b/vendor/github.com/portworx/sched-ops/k8s/apiextensions/apiextensions.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" @@ -139,7 +140,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.extension, err = apiextensionsclient.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/apps/apps.go b/vendor/github.com/portworx/sched-ops/k8s/apps/apps.go index 33bfedb0c4..8d09ad38a4 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/apps/apps.go +++ b/vendor/github.com/portworx/sched-ops/k8s/apps/apps.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" @@ -153,7 +154,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.apps, err = appsv1client.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/autopilot/autopilot.go b/vendor/github.com/portworx/sched-ops/k8s/autopilot/autopilot.go index ef843b3e8d..a5e60ef919 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/autopilot/autopilot.go +++ b/vendor/github.com/portworx/sched-ops/k8s/autopilot/autopilot.go @@ -6,6 +6,7 @@ import ( "sync" autopilotclientset "github.com/libopenstorage/autopilot-api/pkg/client/clientset/versioned" + "github.com/portworx/sched-ops/k8s/common" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) @@ -136,7 +137,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.autopilot, err = autopilotclientset.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/batch/batch.go b/vendor/github.com/portworx/sched-ops/k8s/batch/batch.go index a0fc2490ef..0c90ce452c 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/batch/batch.go +++ b/vendor/github.com/portworx/sched-ops/k8s/batch/batch.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" batchv1client "k8s.io/client-go/kubernetes/typed/batch/v1" batchv1beta1client "k8s.io/client-go/kubernetes/typed/batch/v1beta1" @@ -144,7 +145,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.batch, err = batchv1client.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/common/utils.go b/vendor/github.com/portworx/sched-ops/k8s/common/utils.go new file mode 100644 index 0000000000..9f56023ba2 --- /dev/null +++ b/vendor/github.com/portworx/sched-ops/k8s/common/utils.go @@ -0,0 +1,32 @@ +package common + +import ( + "fmt" + "os" + "strconv" + + "k8s.io/client-go/rest" +) + +const ( + QPSRate = "KUBERNETES_OPS_QPS_RATE" + BurstRate = "KUBERNETES_OPS_BURST_RATE" +) + +func SetRateLimiter(config *rest.Config) error { + if val := os.Getenv(QPSRate); val != "" { + qps, err := strconv.Atoi(val) + if err != nil { + return fmt.Errorf("invalid qps count specified %v: %v", val, err) + } + config.QPS = float32(qps) + } + if val := os.Getenv(BurstRate); val != "" { + burst, err := strconv.Atoi(val) + if err != nil { + return fmt.Errorf("invalid burst count specified %v: %v", val, err) + } + config.Burst = int(burst) + } + return nil +} diff --git a/vendor/github.com/portworx/sched-ops/k8s/core/core.go b/vendor/github.com/portworx/sched-ops/k8s/core/core.go index 72c9cb220d..f8472aa237 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/core/core.go +++ b/vendor/github.com/portworx/sched-ops/k8s/core/core.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "github.com/portworx/sched-ops/k8s/common" "github.com/portworx/sched-ops/task" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" @@ -204,7 +205,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.kubernetes, err = kubernetes.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/dynamic/dynamic.go b/vendor/github.com/portworx/sched-ops/k8s/dynamic/dynamic.go index 5605f75940..c958635f42 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/dynamic/dynamic.go +++ b/vendor/github.com/portworx/sched-ops/k8s/dynamic/dynamic.go @@ -7,6 +7,7 @@ import ( "strings" "sync" + "github.com/portworx/sched-ops/k8s/common" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -223,7 +224,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.client, err = dynamic.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/externalsnapshotter/externalsnapshotter.go b/vendor/github.com/portworx/sched-ops/k8s/externalsnapshotter/externalsnapshotter.go index b4b13c5bd0..81a0ce8bb1 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/externalsnapshotter/externalsnapshotter.go +++ b/vendor/github.com/portworx/sched-ops/k8s/externalsnapshotter/externalsnapshotter.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1" + "github.com/portworx/sched-ops/k8s/common" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) @@ -138,7 +139,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.client, err = v1beta1.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/externalstorage/externalstorage.go b/vendor/github.com/portworx/sched-ops/k8s/externalstorage/externalstorage.go index d038ce200c..4d4a232114 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/externalstorage/externalstorage.go +++ b/vendor/github.com/portworx/sched-ops/k8s/externalstorage/externalstorage.go @@ -6,6 +6,7 @@ import ( "sync" snapclient "github.com/kubernetes-incubator/external-storage/snapshot/pkg/client" + "github.com/portworx/sched-ops/k8s/common" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) @@ -135,7 +136,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.snap, _, err = snapclient.NewClient(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/kdmp/kdmp.go b/vendor/github.com/portworx/sched-ops/k8s/kdmp/kdmp.go index 449c66372c..b1a64268c2 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/kdmp/kdmp.go +++ b/vendor/github.com/portworx/sched-ops/k8s/kdmp/kdmp.go @@ -6,6 +6,7 @@ import ( "sync" kdmpclientset "github.com/portworx/kdmp/pkg/client/clientset/versioned" + "github.com/portworx/sched-ops/k8s/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -152,7 +153,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.kube, err = kubernetes.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/networking/networking.go b/vendor/github.com/portworx/sched-ops/k8s/networking/networking.go index 3cdb6205e2..0a40740bb1 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/networking/networking.go +++ b/vendor/github.com/portworx/sched-ops/k8s/networking/networking.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" networkingv1betaclient "k8s.io/client-go/kubernetes/typed/networking/v1beta1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -135,7 +136,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.networking, err = networkingv1betaclient.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/openshift/openshift.go b/vendor/github.com/portworx/sched-ops/k8s/openshift/openshift.go index 8aeb1a561e..7c165a78b3 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/openshift/openshift.go +++ b/vendor/github.com/portworx/sched-ops/k8s/openshift/openshift.go @@ -8,6 +8,7 @@ import ( ocpclientset "github.com/openshift/client-go/apps/clientset/versioned" ocpconfigclientset "github.com/openshift/client-go/config/clientset/versioned" ocpsecurityclientset "github.com/openshift/client-go/security/clientset/versioned" + "github.com/portworx/sched-ops/k8s/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -172,7 +173,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.kube, err = kubernetes.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/operator/operator.go b/vendor/github.com/portworx/sched-ops/k8s/operator/operator.go index 5106e1ef01..84704a1560 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/operator/operator.go +++ b/vendor/github.com/portworx/sched-ops/k8s/operator/operator.go @@ -6,6 +6,7 @@ import ( "sync" ostclientset "github.com/libopenstorage/operator/pkg/client/clientset/versioned" + "github.com/portworx/sched-ops/k8s/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -139,7 +140,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.ost, err = ostclientset.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/policy/policy.go b/vendor/github.com/portworx/sched-ops/k8s/policy/policy.go index 46b3a5d4ca..ee188c5218 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/policy/policy.go +++ b/vendor/github.com/portworx/sched-ops/k8s/policy/policy.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" policyv1beta1client "k8s.io/client-go/kubernetes/typed/policy/v1beta1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -136,7 +137,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.policy, err = policyv1beta1client.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/prometheus/prometheus.go b/vendor/github.com/portworx/sched-ops/k8s/prometheus/prometheus.go index b88a98a52a..4d8b42d557 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/prometheus/prometheus.go +++ b/vendor/github.com/portworx/sched-ops/k8s/prometheus/prometheus.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" prometheusclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" @@ -143,7 +144,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.prometheus, err = prometheusclient.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/rbac/rbac.go b/vendor/github.com/portworx/sched-ops/k8s/rbac/rbac.go index 1b1784a3f8..aac939c6fb 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/rbac/rbac.go +++ b/vendor/github.com/portworx/sched-ops/k8s/rbac/rbac.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/client-go/rest" @@ -141,7 +142,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.rbac, err = rbacv1client.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/storage/storage.go b/vendor/github.com/portworx/sched-ops/k8s/storage/storage.go index f62584b4cc..740853443e 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/storage/storage.go +++ b/vendor/github.com/portworx/sched-ops/k8s/storage/storage.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" storagev1client "k8s.io/client-go/kubernetes/typed/storage/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -136,6 +137,10 @@ func (c *Client) loadClient() error { } var err error + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.storage, err = storagev1client.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/stork/stork.go b/vendor/github.com/portworx/sched-ops/k8s/stork/stork.go index e2432e4800..ec58e8465b 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/stork/stork.go +++ b/vendor/github.com/portworx/sched-ops/k8s/stork/stork.go @@ -10,6 +10,7 @@ import ( snapclient "github.com/kubernetes-incubator/external-storage/snapshot/pkg/client" storkv1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" storkclientset "github.com/libopenstorage/stork/pkg/client/clientset/versioned" + "github.com/portworx/sched-ops/k8s/common" "github.com/portworx/sched-ops/task" "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -175,7 +176,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.kube, err = kubernetes.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/modules.txt b/vendor/modules.txt index bc6a071260..fbd052f09a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -731,7 +731,7 @@ github.com/portworx/px-object-controller/client/listers/objectservice/v1alpha1 github.com/portworx/px-object-controller/pkg/client github.com/portworx/px-object-controller/pkg/controller github.com/portworx/px-object-controller/pkg/utils -# github.com/portworx/sched-ops v1.20.4-rc1.0.20220401024625-dbc61a336f65 => github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 +# github.com/portworx/sched-ops v1.20.4-rc1.0.20220401024625-dbc61a336f65 => github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a ## explicit github.com/portworx/sched-ops/k8s/admissionregistration github.com/portworx/sched-ops/k8s/apiextensions @@ -1906,7 +1906,7 @@ sigs.k8s.io/yaml # github.com/kubernetes-incubator/external-storage => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 # github.com/libopenstorage/autopilot-api => github.com/libopenstorage/autopilot-api v0.6.1-0.20210301232050-ca2633c6e114 # github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20220707215604-afbea03c04c5 -# github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 +# github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a # github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 # gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 # helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.1 From 1eb63a261de4f23dca476504e2d5187253094748 Mon Sep 17 00:00:00 2001 From: sivakumar subramani Date: Thu, 6 Oct 2022 09:21:18 +0530 Subject: [PATCH 37/97] pb-3124: updated the storageclass and storage provisioner annotation, if storageclassmapping is present during restore. --- .../persistentvolumeclaim.go | 21 +++++++++++++++++++ pkg/resourcecollector/resourcecollector.go | 7 +++++++ pkg/utils/utils.go | 4 ++++ 3 files changed, 32 insertions(+) diff --git a/pkg/resourcecollector/persistentvolumeclaim.go b/pkg/resourcecollector/persistentvolumeclaim.go index 3b92f75c6e..08d884ab27 100644 --- a/pkg/resourcecollector/persistentvolumeclaim.go +++ b/pkg/resourcecollector/persistentvolumeclaim.go @@ -5,6 +5,7 @@ import ( "github.com/libopenstorage/stork/drivers/volume" stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/libopenstorage/stork/pkg/utils" "github.com/portworx/sched-ops/k8s/core" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -96,6 +97,26 @@ func (r *ResourceCollector) preparePVCResourceForApply( } } + if len(storageClassMappings) > 0 { + // In the case of storageClassMappings, we need to reset the + // storage class annotation and the provisioner annotation + var newSc string + var exists bool + if val, ok := pvc.Annotations[utils.StorageClassAnnotationKey]; ok { + if newSc, exists = storageClassMappings[val]; exists && len(newSc) > 0 { + pvc.Annotations[utils.StorageClassAnnotationKey] = newSc + } + } + if _, ok := pvc.Annotations[utils.StorageProvisionerAnnotationKey]; ok { + if len(newSc) > 0 { + storageClass, err := r.storageOps.GetStorageClass(newSc) + if err != nil { + return false, fmt.Errorf("failed in getting the storage class [%v]: %v", newSc, err) + } + pvc.Annotations[utils.StorageProvisionerAnnotationKey] = storageClass.Provisioner + } + } + } if len(storageClassMappings) > 0 && pvc.Spec.StorageClassName != nil { if newSc, exists := storageClassMappings[*pvc.Spec.StorageClassName]; exists && len(newSc) > 0 { diff --git a/pkg/resourcecollector/resourcecollector.go b/pkg/resourcecollector/resourcecollector.go index 5c09880a12..6be938aa02 100644 --- a/pkg/resourcecollector/resourcecollector.go +++ b/pkg/resourcecollector/resourcecollector.go @@ -13,6 +13,7 @@ import ( stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" "github.com/portworx/sched-ops/k8s/core" "github.com/portworx/sched-ops/k8s/rbac" + "github.com/portworx/sched-ops/k8s/storage" storkops "github.com/portworx/sched-ops/k8s/stork" "github.com/sirupsen/logrus" rbacv1 "k8s.io/api/rbac/v1" @@ -68,6 +69,7 @@ type ResourceCollector struct { coreOps core.Ops rbacOps rbac.Ops storkOps storkops.Ops + storageOps storage.Ops } // Options are the options passed to the ResourceCollector APIs that dictate how k8s @@ -135,6 +137,11 @@ func (r *ResourceCollector) Init(config *restclient.Config) error { if err != nil { return err } + r.storageOps, err = storage.NewForConfig(config) + if err != nil { + return err + } + return nil } diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 8a57f0930a..009f60b988 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -13,6 +13,10 @@ const ( // PXIncrementalCountAnnotation is the annotation used to set cloud backup incremental count // for volume PXIncrementalCountAnnotation = "portworx.io/cloudsnap-incremental-count" + // StorageClassAnnotationKey - Annotation key for storageClass + StorageClassAnnotationKey = "volume.beta.kubernetes.io/storage-class" + // StorageProvisionerAnnotationKey - Annotation key for storage provisioner + StorageProvisionerAnnotationKey = "volume.beta.kubernetes.io/storage-provisioner" ) // ParseKeyValueList parses a list of key=values string into a map From a183b90fcc40a3afccd8aadb9823e827646981b7 Mon Sep 17 00:00:00 2001 From: sivakumar subramani Date: Thu, 6 Oct 2022 00:09:34 +0530 Subject: [PATCH 38/97] pb-3138: Resetting the serviceAccountUID annotation in the secret. - When applying the service account, it will get a new UID. - This UID need to be updated in the secret. Resetting the old UID to empty string, so that k8s will update the new UID of service account. --- pkg/resourcecollector/resourcecollector.go | 2 ++ pkg/resourcecollector/secret.go | 31 ++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/pkg/resourcecollector/resourcecollector.go b/pkg/resourcecollector/resourcecollector.go index 6be938aa02..9b259ab3c9 100644 --- a/pkg/resourcecollector/resourcecollector.go +++ b/pkg/resourcecollector/resourcecollector.go @@ -828,6 +828,8 @@ func (r *ResourceCollector) PrepareResourceForApply( return false, r.prepareValidatingWebHookForApply(object, namespaceMappings) case "MutatingWebhookConfiguration": return false, r.prepareMutatingWebHookForApply(object, namespaceMappings) + case "Secret": + return false, r.prepareSecretForApply(object) } return false, nil } diff --git a/pkg/resourcecollector/secret.go b/pkg/resourcecollector/secret.go index a052b5240f..ea277cfdda 100644 --- a/pkg/resourcecollector/secret.go +++ b/pkg/resourcecollector/secret.go @@ -8,6 +8,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +const ( + serviceAccountUIDKey = "kubernetes.io/service-account.uid" +) + func (r *ResourceCollector) secretToBeCollected( object runtime.Unstructured, ) (bool, error) { @@ -26,3 +30,30 @@ func (r *ResourceCollector) secretToBeCollected( return true, nil } + +func (r *ResourceCollector) prepareSecretForApply( + object runtime.Unstructured, +) error { + var secret v1.Secret + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &secret); err != nil { + logrus.Errorf("Error converting Secret object %v: %v", object, err) + return err + } + // Reset the " kubernetes.io/service-account.uid" annotation, + // so that it will update the uid of the newly created SA after restoring + err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &secret) + if err != nil { + return err + } + if secret.Annotations != nil { + if _, ok := secret.Annotations[serviceAccountUIDKey]; ok { + secret.Annotations[serviceAccountUIDKey] = "" + } + } + o, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&secret) + if err != nil { + return err + } + object.SetUnstructuredContent(o) + return err +} From ff6299dcee6a8f4415fc553f2b1501f49895f597 Mon Sep 17 00:00:00 2001 From: sivakumar subramani Date: Fri, 7 Oct 2022 20:19:21 +0530 Subject: [PATCH 39/97] pb-3122: Added logic to include clusterRole, if it is bind to a Rolebinding at namespace level. --- pkg/resourcecollector/clusterrole.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/pkg/resourcecollector/clusterrole.go b/pkg/resourcecollector/clusterrole.go index 7f4c32df66..436c5b3a22 100644 --- a/pkg/resourcecollector/clusterrole.go +++ b/pkg/resourcecollector/clusterrole.go @@ -1,6 +1,7 @@ package resourcecollector import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "strings" rbacv1 "k8s.io/api/rbac/v1" @@ -99,6 +100,21 @@ func (r *ResourceCollector) clusterRoleToBeCollected( } } } + + // clusterRole can also be bind to role binding as well at namespace level + // Get the role binding for the given namespace + var filterOptions = metav1.ListOptions{} + rbs, err := r.rbacOps.ListRoleBinding(namespace, filterOptions) + if err != nil { + return false, err + } + // Find the corresponding RoleBinding and see if it belongs to the requested namespace + for _, rb := range rbs.Items { + if rb.RoleRef.Name == name && rb.Namespace == namespace { + return true, nil + } + } + return false, nil } From b3440c4414cb7a79ca71a3d1b8a11e4fa15b9fa5 Mon Sep 17 00:00:00 2001 From: sivakumar subramani Date: Fri, 7 Oct 2022 04:13:48 +0530 Subject: [PATCH 40/97] pb-3123: Added logic to trim the group name, if crds have last three part of groups in common. - For example, some of the crds have group as agent.k8s.elastic.co, apm.k8s.elastic.co and beat.k8s.elastic.co - These case, we group them while taking backup as they have last three part og the group name in common. --- .../controllers/applicationbackup.go | 9 ++--- pkg/utils/utils.go | 36 +++++++++++++++++++ 2 files changed, 41 insertions(+), 4 deletions(-) diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index 506c5bba45..7e5c9710d1 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/libopenstorage/stork/pkg/utils" "math" "os" "path/filepath" @@ -1063,14 +1064,14 @@ func (a *ApplicationBackupController) uploadCRDResources(backup *stork_api.Appli if _, ok := resKinds[v.Kind]; !ok { continue } - crdsGroups[v.Group] = true + crdsGroups[utils.GetTrimmedGroupName(v.Group)] = true } } // pick up all the CRDs that belongs to the group in the crdsGroups map for _, crd := range crdList.Items { for _, v := range crd.Resources { - if _, ok := crdsGroups[v.Group]; !ok { + if _, ok := crdsGroups[utils.GetTrimmedGroupName(v.Group)]; !ok { continue } crdName := ruleset.Pluralize(strings.ToLower(v.Kind)) + "." + v.Group @@ -1103,13 +1104,13 @@ func (a *ApplicationBackupController) uploadCRDResources(backup *stork_api.Appli if _, ok := resKinds[v.Kind]; !ok { continue } - crdsGroups[v.Group] = true + crdsGroups[utils.GetTrimmedGroupName(v.Group)] = true } } // pick up all the CRDs that belongs to the group in the crdsGroups map for _, crd := range crdList.Items { for _, v := range crd.Resources { - if _, ok := crdsGroups[v.Group]; !ok { + if _, ok := crdsGroups[utils.GetTrimmedGroupName(v.Group)]; !ok { continue } crdName := ruleset.Pluralize(strings.ToLower(v.Kind)) + "." + v.Group diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 009f60b988..437c15fd69 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -2,6 +2,9 @@ package utils import ( "fmt" + "github.com/libopenstorage/stork/drivers" + "github.com/portworx/sched-ops/k8s/core" + "github.com/sirupsen/logrus" "strings" ) @@ -17,6 +20,8 @@ const ( StorageClassAnnotationKey = "volume.beta.kubernetes.io/storage-class" // StorageProvisionerAnnotationKey - Annotation key for storage provisioner StorageProvisionerAnnotationKey = "volume.beta.kubernetes.io/storage-provisioner" + // trimCRDGroupNameKey - groups name containing the string from this configmap field will be trimmed + trimCRDGroupNameKey = "TRIM_CRD_GROUP_NAME" ) // ParseKeyValueList parses a list of key=values string into a map @@ -34,3 +39,34 @@ func ParseKeyValueList(expressions []string) (map[string]string, error) { return matchLabels, nil } + +// GetTrimmedGroupName - get the trimmed group name +// Usually the groups of names of CRDs that belongs to the common operator have same group name. +// For example: +// keycloakbackups.keycloak.org, keycloakclients.keycloak.org, keycloakrealms.keycloak.org +// keycloaks.keycloak.org, keycloakusers.keycloak.org +// Here the group name is "keycloak.org" +// In some case, the CRDs names are as follow: +// agents.agent.k8s.elastic.co - groupname: agent.k8s.elastic.co +// apmservers.apm.k8s.elastic.co - groupname: apm.k8s.elastic.co +// beats.beat.k8s.elastic.co - group name: beat.k8s.elastic.co +// Here the group name are different even though they belong to a same opeator. +// But they have common three parts, like "k8s.elastic.co" +// So added a logic to combine the CRDs, if they have common last three part, if the group have more than three parts. +func GetTrimmedGroupName(group string) string { + kdmpData, err := core.Instance().GetConfigMap(drivers.KdmpConfigmapName, drivers.KdmpConfigmapNamespace) + if err != nil { + logrus.Warnf("error in reading configMap [%v/%v]", + drivers.KdmpConfigmapName, drivers.KdmpConfigmapNamespace) + return group + } + if len(kdmpData.Data[trimCRDGroupNameKey]) != 0 { + groupNameList := strings.Split(kdmpData.Data[trimCRDGroupNameKey], ",") + for _, groupName := range groupNameList { + if strings.Contains(group, groupName) { + return groupName + } + } + } + return group +} From 5c84d8101993c3f97f6627d1a5413d9c5dbddf3a Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Tue, 11 Oct 2022 16:58:46 +0000 Subject: [PATCH 41/97] pb-3124: Added check for both beta and v1 version of StorageProvisioner annotation --- .../persistentvolumeclaim.go | 29 ++++++++++++------- pkg/resourcecollector/secret.go | 4 --- pkg/utils/utils.go | 4 --- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/pkg/resourcecollector/persistentvolumeclaim.go b/pkg/resourcecollector/persistentvolumeclaim.go index 08d884ab27..1b1307e5d0 100644 --- a/pkg/resourcecollector/persistentvolumeclaim.go +++ b/pkg/resourcecollector/persistentvolumeclaim.go @@ -5,11 +5,11 @@ import ( "github.com/libopenstorage/stork/drivers/volume" stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" - "github.com/libopenstorage/stork/pkg/utils" "github.com/portworx/sched-ops/k8s/core" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + k8shelper "k8s.io/component-helpers/storage/volume" pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util" ) @@ -102,18 +102,27 @@ func (r *ResourceCollector) preparePVCResourceForApply( // storage class annotation and the provisioner annotation var newSc string var exists bool - if val, ok := pvc.Annotations[utils.StorageClassAnnotationKey]; ok { + var provisioner string + if val, ok := pvc.Annotations[v1.BetaStorageClassAnnotation]; ok { if newSc, exists = storageClassMappings[val]; exists && len(newSc) > 0 { - pvc.Annotations[utils.StorageClassAnnotationKey] = newSc + pvc.Annotations[v1.BetaStorageClassAnnotation] = newSc } } - if _, ok := pvc.Annotations[utils.StorageProvisionerAnnotationKey]; ok { - if len(newSc) > 0 { - storageClass, err := r.storageOps.GetStorageClass(newSc) - if err != nil { - return false, fmt.Errorf("failed in getting the storage class [%v]: %v", newSc, err) - } - pvc.Annotations[utils.StorageProvisionerAnnotationKey] = storageClass.Provisioner + if len(newSc) > 0 { + storageClass, err := r.storageOps.GetStorageClass(newSc) + if err != nil { + return false, fmt.Errorf("failed in getting the storage class [%v]: %v", newSc, err) + } + provisioner = storageClass.Provisioner + } + if _, ok := pvc.Annotations[k8shelper.AnnBetaStorageProvisioner]; ok { + if len(provisioner) > 0 { + pvc.Annotations[k8shelper.AnnBetaStorageProvisioner] = provisioner + } + } + if _, ok := pvc.Annotations[k8shelper.AnnStorageProvisioner]; ok { + if len(provisioner) > 0 { + pvc.Annotations[k8shelper.AnnStorageProvisioner] = provisioner } } } diff --git a/pkg/resourcecollector/secret.go b/pkg/resourcecollector/secret.go index ea277cfdda..14ccc759b3 100644 --- a/pkg/resourcecollector/secret.go +++ b/pkg/resourcecollector/secret.go @@ -41,10 +41,6 @@ func (r *ResourceCollector) prepareSecretForApply( } // Reset the " kubernetes.io/service-account.uid" annotation, // so that it will update the uid of the newly created SA after restoring - err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &secret) - if err != nil { - return err - } if secret.Annotations != nil { if _, ok := secret.Annotations[serviceAccountUIDKey]; ok { secret.Annotations[serviceAccountUIDKey] = "" diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 437c15fd69..e27f475ce1 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -16,10 +16,6 @@ const ( // PXIncrementalCountAnnotation is the annotation used to set cloud backup incremental count // for volume PXIncrementalCountAnnotation = "portworx.io/cloudsnap-incremental-count" - // StorageClassAnnotationKey - Annotation key for storageClass - StorageClassAnnotationKey = "volume.beta.kubernetes.io/storage-class" - // StorageProvisionerAnnotationKey - Annotation key for storage provisioner - StorageProvisionerAnnotationKey = "volume.beta.kubernetes.io/storage-provisioner" // trimCRDGroupNameKey - groups name containing the string from this configmap field will be trimmed trimCRDGroupNameKey = "TRIM_CRD_GROUP_NAME" ) From 6c001bd6ccf3f6f34b67241c037d6f912fc2ad8b Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Wed, 12 Oct 2022 00:53:02 +0000 Subject: [PATCH 42/97] pb-3124: vendor changes for k8s component-helpers --- go.mod | 4 +- go.sum | 4 + .../storage/volume/helpers.go | 28 +- .../storage/volume/pv_helpers.go | 342 ++++++++++++++++++ vendor/k8s.io/utils/pointer/pointer.go | 49 ++- vendor/modules.txt | 6 +- 6 files changed, 425 insertions(+), 8 deletions(-) create mode 100644 vendor/k8s.io/component-helpers/storage/volume/pv_helpers.go diff --git a/go.mod b/go.mod index f31faa0219..fa23974dd8 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,7 @@ require ( k8s.io/cli-runtime v0.21.5 k8s.io/client-go v12.0.0+incompatible k8s.io/code-generator v0.21.5 - k8s.io/component-helpers v0.21.5 + k8s.io/component-helpers v0.24.0 k8s.io/kube-scheduler v0.0.0 k8s.io/kubectl v0.21.5 k8s.io/kubernetes v1.21.5 @@ -93,7 +93,7 @@ replace ( k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.5 k8s.io/code-generator => k8s.io/code-generator v0.21.5 k8s.io/component-base => k8s.io/component-base v0.21.5 - k8s.io/component-helpers => k8s.io/component-helpers v0.21.5 + k8s.io/component-helpers => k8s.io/component-helpers v0.24.0 k8s.io/controller-manager => k8s.io/controller-manager v0.21.5 k8s.io/cri-api => k8s.io/cri-api v0.21.5 k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.5 diff --git a/go.sum b/go.sum index 98f42c56de..0815a64f41 100644 --- a/go.sum +++ b/go.sum @@ -2416,6 +2416,8 @@ k8s.io/component-helpers v0.21.4 h1:Q6L3sQ+L5uaaUcsJkhlzU5UchcIYBZ56Y2Bq5k4qOtk= k8s.io/component-helpers v0.21.4/go.mod h1:/5TBNWmxaAymZweO1JWv3Pt5rcYJV1LbWWY0x1rDdVU= k8s.io/component-helpers v0.21.5 h1:NzRIDAmDk0tJw2OSvDIlkXQ/j96MUKW0PF/htVH6S1g= k8s.io/component-helpers v0.21.5/go.mod h1:sjHa2QESu4iHcL20eSKyIvCYEKdxQyS3LthUe10tt0k= +k8s.io/component-helpers v0.24.0 h1:hZIHGfdd55thhqd9oxjDTw68OAPauDMJ+8hC69aNw1I= +k8s.io/component-helpers v0.24.0/go.mod h1:Q2SlLm4h6g6lPTC9GMMfzdywfLSvJT2f1hOnnjaWD8c= k8s.io/controller-manager v0.21.4/go.mod h1:a/iL7W19zkyirHDaupk9cyC11nejVznGwZI6I8tbyQY= k8s.io/controller-manager v0.21.5/go.mod h1:65guJmOoIEklTPwwYOmiHOsbEoAnQ7xKSZ9v+RdJC6g= k8s.io/cri-api v0.21.4/go.mod h1:ukzeKnOkrG9/+ghKZA57WeZbQfRtqlGLF5GcF3RtHZ8= @@ -2484,6 +2486,8 @@ k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9 k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 h1:DnzUXII7sVg1FJ/4JX6YDRJfLNAC7idRatPwe07suiI= k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= knative.dev/pkg v0.0.0-20191101194912-56c2594e4f11/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q= layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= diff --git a/vendor/k8s.io/component-helpers/storage/volume/helpers.go b/vendor/k8s.io/component-helpers/storage/volume/helpers.go index a76e933448..7ec376f34a 100644 --- a/vendor/k8s.io/component-helpers/storage/volume/helpers.go +++ b/vendor/k8s.io/component-helpers/storage/volume/helpers.go @@ -16,7 +16,13 @@ limitations under the License. package volume -import v1 "k8s.io/api/core/v1" +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/component-helpers/scheduling/corev1" +) // GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was // requested, it returns "". @@ -42,3 +48,23 @@ func GetPersistentVolumeClass(volume *v1.PersistentVolume) string { return volume.Spec.StorageClassName } + +// CheckNodeAffinity looks at the PV node affinity, and checks if the node has the same corresponding labels +// This ensures that we don't mount a volume that doesn't belong to this node +func CheckNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]string) error { + if pv.Spec.NodeAffinity == nil { + return nil + } + + if pv.Spec.NodeAffinity.Required != nil { + node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: nodeLabels}} + terms := pv.Spec.NodeAffinity.Required + if matches, err := corev1.MatchNodeSelectorTerms(node, terms); err != nil { + return err + } else if !matches { + return fmt.Errorf("no matching NodeSelectorTerms") + } + } + + return nil +} diff --git a/vendor/k8s.io/component-helpers/storage/volume/pv_helpers.go b/vendor/k8s.io/component-helpers/storage/volume/pv_helpers.go new file mode 100644 index 0000000000..f927b72314 --- /dev/null +++ b/vendor/k8s.io/component-helpers/storage/volume/pv_helpers.go @@ -0,0 +1,342 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volume + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/kubernetes/scheme" + storagelisters "k8s.io/client-go/listers/storage/v1" + "k8s.io/client-go/tools/reference" +) + +const ( + // AnnBindCompleted Annotation applies to PVCs. It indicates that the lifecycle + // of the PVC has passed through the initial setup. This information changes how + // we interpret some observations of the state of the objects. Value of this + // Annotation does not matter. + AnnBindCompleted = "pv.kubernetes.io/bind-completed" + + // AnnBoundByController annotation applies to PVs and PVCs. It indicates that + // the binding (PV->PVC or PVC->PV) was installed by the controller. The + // absence of this annotation means the binding was done by the user (i.e. + // pre-bound). Value of this annotation does not matter. + // External PV binders must bind PV the same way as PV controller, otherwise PV + // controller may not handle it correctly. + AnnBoundByController = "pv.kubernetes.io/bound-by-controller" + + // AnnSelectedNode annotation is added to a PVC that has been triggered by scheduler to + // be dynamically provisioned. Its value is the name of the selected node. + AnnSelectedNode = "volume.kubernetes.io/selected-node" + + // NotSupportedProvisioner is a special provisioner name which can be set + // in storage class to indicate dynamic provisioning is not supported by + // the storage. + NotSupportedProvisioner = "kubernetes.io/no-provisioner" + + // AnnDynamicallyProvisioned annotation is added to a PV that has been dynamically provisioned by + // Kubernetes. Its value is name of volume plugin that created the volume. + // It serves both user (to show where a PV comes from) and Kubernetes (to + // recognize dynamically provisioned PVs in its decisions). + AnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by" + + // AnnMigratedTo annotation is added to a PVC and PV that is supposed to be + // dynamically provisioned/deleted by by its corresponding CSI driver + // through the CSIMigration feature flags. When this annotation is set the + // Kubernetes components will "stand-down" and the external-provisioner will + // act on the objects + AnnMigratedTo = "pv.kubernetes.io/migrated-to" + + // AnnStorageProvisioner annotation is added to a PVC that is supposed to be dynamically + // provisioned. Its value is name of volume plugin that is supposed to provision + // a volume for this PVC. + // TODO: remove beta anno once deprecation period ends + AnnStorageProvisioner = "volume.kubernetes.io/storage-provisioner" + AnnBetaStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner" + + //PVDeletionProtectionFinalizer is the finalizer added by the external-provisioner on the PV + PVDeletionProtectionFinalizer = "external-provisioner.volume.kubernetes.io/finalizer" + + // PVDeletionInTreeProtectionFinalizer is the finalizer added to protect PV deletion for in-tree volumes. + PVDeletionInTreeProtectionFinalizer = "kubernetes.io/pv-controller" +) + +// IsDelayBindingProvisioning checks if claim provisioning with selected-node annotation +func IsDelayBindingProvisioning(claim *v1.PersistentVolumeClaim) bool { + // When feature VolumeScheduling enabled, + // Scheduler signal to the PV controller to start dynamic + // provisioning by setting the "AnnSelectedNode" annotation + // in the PVC + _, ok := claim.Annotations[AnnSelectedNode] + return ok +} + +// IsDelayBindingMode checks if claim is in delay binding mode. +func IsDelayBindingMode(claim *v1.PersistentVolumeClaim, classLister storagelisters.StorageClassLister) (bool, error) { + className := GetPersistentVolumeClaimClass(claim) + if className == "" { + return false, nil + } + + class, err := classLister.Get(className) + if err != nil { + if apierrors.IsNotFound(err) { + return false, nil + } + return false, err + } + + if class.VolumeBindingMode == nil { + return false, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", className) + } + + return *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer, nil +} + +// GetBindVolumeToClaim returns a new volume which is bound to given claim. In +// addition, it returns a bool which indicates whether we made modification on +// original volume. +func GetBindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, bool, error) { + dirty := false + + // Check if the volume was already bound (either by user or by controller) + shouldSetBoundByController := false + if !IsVolumeBoundToClaim(volume, claim) { + shouldSetBoundByController = true + } + + // The volume from method args can be pointing to watcher cache. We must not + // modify these, therefore create a copy. + volumeClone := volume.DeepCopy() + + // Bind the volume to the claim if it is not bound yet + if volume.Spec.ClaimRef == nil || + volume.Spec.ClaimRef.Name != claim.Name || + volume.Spec.ClaimRef.Namespace != claim.Namespace || + volume.Spec.ClaimRef.UID != claim.UID { + + claimRef, err := reference.GetReference(scheme.Scheme, claim) + if err != nil { + return nil, false, fmt.Errorf("unexpected error getting claim reference: %w", err) + } + volumeClone.Spec.ClaimRef = claimRef + dirty = true + } + + // Set AnnBoundByController if it is not set yet + if shouldSetBoundByController && !metav1.HasAnnotation(volumeClone.ObjectMeta, AnnBoundByController) { + metav1.SetMetaDataAnnotation(&volumeClone.ObjectMeta, AnnBoundByController, "yes") + dirty = true + } + + return volumeClone, dirty, nil +} + +// IsVolumeBoundToClaim returns true, if given volume is pre-bound or bound +// to specific claim. Both claim.Name and claim.Namespace must be equal. +// If claim.UID is present in volume.Spec.ClaimRef, it must be equal too. +func IsVolumeBoundToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) bool { + if volume.Spec.ClaimRef == nil { + return false + } + if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace { + return false + } + if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID { + return false + } + return true +} + +// FindMatchingVolume goes through the list of volumes to find the best matching volume +// for the claim. +// +// This function is used by both the PV controller and scheduler. +// +// delayBinding is true only in the PV controller path. When set, prebound PVs are still returned +// as a match for the claim, but unbound PVs are skipped. +// +// node is set only in the scheduler path. When set, the PV node affinity is checked against +// the node's labels. +// +// excludedVolumes is only used in the scheduler path, and is needed for evaluating multiple +// unbound PVCs for a single Pod at one time. As each PVC finds a matching PV, the chosen +// PV needs to be excluded from future matching. +func FindMatchingVolume( + claim *v1.PersistentVolumeClaim, + volumes []*v1.PersistentVolume, + node *v1.Node, + excludedVolumes map[string]*v1.PersistentVolume, + delayBinding bool) (*v1.PersistentVolume, error) { + + var smallestVolume *v1.PersistentVolume + var smallestVolumeQty resource.Quantity + requestedQty := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] + requestedClass := GetPersistentVolumeClaimClass(claim) + + var selector labels.Selector + if claim.Spec.Selector != nil { + internalSelector, err := metav1.LabelSelectorAsSelector(claim.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("error creating internal label selector for claim: %v: %v", claimToClaimKey(claim), err) + } + selector = internalSelector + } + + // Go through all available volumes with two goals: + // - find a volume that is either pre-bound by user or dynamically + // provisioned for this claim. Because of this we need to loop through + // all volumes. + // - find the smallest matching one if there is no volume pre-bound to + // the claim. + for _, volume := range volumes { + if _, ok := excludedVolumes[volume.Name]; ok { + // Skip volumes in the excluded list + continue + } + if volume.Spec.ClaimRef != nil && !IsVolumeBoundToClaim(volume, claim) { + continue + } + + volumeQty := volume.Spec.Capacity[v1.ResourceStorage] + if volumeQty.Cmp(requestedQty) < 0 { + continue + } + // filter out mismatching volumeModes + if CheckVolumeModeMismatches(&claim.Spec, &volume.Spec) { + continue + } + + // check if PV's DeletionTimeStamp is set, if so, skip this volume. + if volume.ObjectMeta.DeletionTimestamp != nil { + continue + } + + nodeAffinityValid := true + if node != nil { + // Scheduler path, check that the PV NodeAffinity + // is satisfied by the node + // CheckNodeAffinity is the most expensive call in this loop. + // We should check cheaper conditions first or consider optimizing this function. + err := CheckNodeAffinity(volume, node.Labels) + if err != nil { + nodeAffinityValid = false + } + } + + if IsVolumeBoundToClaim(volume, claim) { + // If PV node affinity is invalid, return no match. + // This means the prebound PV (and therefore PVC) + // is not suitable for this node. + if !nodeAffinityValid { + return nil, nil + } + + return volume, nil + } + + if node == nil && delayBinding { + // PV controller does not bind this claim. + // Scheduler will handle binding unbound volumes + // Scheduler path will have node != nil + continue + } + + // filter out: + // - volumes in non-available phase + // - volumes whose labels don't match the claim's selector, if specified + // - volumes in Class that is not requested + // - volumes whose NodeAffinity does not match the node + if volume.Status.Phase != v1.VolumeAvailable { + // We ignore volumes in non-available phase, because volumes that + // satisfies matching criteria will be updated to available, binding + // them now has high chance of encountering unnecessary failures + // due to API conflicts. + continue + } else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) { + continue + } + if GetPersistentVolumeClass(volume) != requestedClass { + continue + } + if !nodeAffinityValid { + continue + } + + if node != nil { + // Scheduler path + // Check that the access modes match + if !CheckAccessModes(claim, volume) { + continue + } + } + + if smallestVolume == nil || smallestVolumeQty.Cmp(volumeQty) > 0 { + smallestVolume = volume + smallestVolumeQty = volumeQty + } + } + + if smallestVolume != nil { + // Found a matching volume + return smallestVolume, nil + } + + return nil, nil +} + +// CheckVolumeModeMismatches is a convenience method that checks volumeMode for PersistentVolume +// and PersistentVolumeClaims +func CheckVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) bool { + // In HA upgrades, we cannot guarantee that the apiserver is on a version >= controller-manager. + // So we default a nil volumeMode to filesystem + requestedVolumeMode := v1.PersistentVolumeFilesystem + if pvcSpec.VolumeMode != nil { + requestedVolumeMode = *pvcSpec.VolumeMode + } + pvVolumeMode := v1.PersistentVolumeFilesystem + if pvSpec.VolumeMode != nil { + pvVolumeMode = *pvSpec.VolumeMode + } + return requestedVolumeMode != pvVolumeMode +} + +// CheckAccessModes returns true if PV satisfies all the PVC's requested AccessModes +func CheckAccessModes(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) bool { + pvModesMap := map[v1.PersistentVolumeAccessMode]bool{} + for _, mode := range volume.Spec.AccessModes { + pvModesMap[mode] = true + } + + for _, mode := range claim.Spec.AccessModes { + _, ok := pvModesMap[mode] + if !ok { + return false + } + } + return true +} + +func claimToClaimKey(claim *v1.PersistentVolumeClaim) string { + return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name) +} diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go index 1da6f6664a..f5802d2e81 100644 --- a/vendor/k8s.io/utils/pointer/pointer.go +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -19,6 +19,7 @@ package pointer import ( "fmt" "reflect" + "time" ) // AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, @@ -46,6 +47,24 @@ func AllPtrFieldsNil(obj interface{}) bool { return true } +// Int returns a pointer to an int +func Int(i int) *int { + return &i +} + +var IntPtr = Int // for back-compat + +// IntDeref dereferences the int ptr and returns it if not nil, or else +// returns def. +func IntDeref(ptr *int, def int) int { + if ptr != nil { + return *ptr + } + return def +} + +var IntPtrDerefOr = IntDeref // for back-compat + // Int32 returns a pointer to an int32. func Int32(i int32) *int32 { return &i @@ -166,7 +185,7 @@ func StringEqual(a, b *string) bool { return *a == *b } -// Float32 returns a pointer to the a float32. +// Float32 returns a pointer to a float32. func Float32(i float32) *float32 { return &i } @@ -196,7 +215,7 @@ func Float32Equal(a, b *float32) bool { return *a == *b } -// Float64 returns a pointer to the a float64. +// Float64 returns a pointer to a float64. func Float64(i float64) *float64 { return &i } @@ -225,3 +244,29 @@ func Float64Equal(a, b *float64) bool { } return *a == *b } + +// Duration returns a pointer to a time.Duration. +func Duration(d time.Duration) *time.Duration { + return &d +} + +// DurationDeref dereferences the time.Duration ptr and returns it if not nil, or else +// returns def. +func DurationDeref(ptr *time.Duration, def time.Duration) time.Duration { + if ptr != nil { + return *ptr + } + return def +} + +// DurationEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +func DurationEqual(a, b *time.Duration) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} diff --git a/vendor/modules.txt b/vendor/modules.txt index fbd052f09a..784aaed933 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1670,7 +1670,7 @@ k8s.io/component-base/featuregate k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry k8s.io/component-base/version -# k8s.io/component-helpers v0.21.5 => k8s.io/component-helpers v0.21.5 +# k8s.io/component-helpers v0.24.0 => k8s.io/component-helpers v0.24.0 ## explicit k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity @@ -1754,7 +1754,7 @@ k8s.io/kubernetes/pkg/volume/util/types k8s.io/kubernetes/pkg/volume/util/volumepathhandler # k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.21.5 k8s.io/mount-utils -# k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 +# k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 k8s.io/utils/buffer k8s.io/utils/exec k8s.io/utils/integer @@ -1920,7 +1920,7 @@ sigs.k8s.io/yaml # k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.5 # k8s.io/code-generator => k8s.io/code-generator v0.21.5 # k8s.io/component-base => k8s.io/component-base v0.21.5 -# k8s.io/component-helpers => k8s.io/component-helpers v0.21.5 +# k8s.io/component-helpers => k8s.io/component-helpers v0.24.0 # k8s.io/controller-manager => k8s.io/controller-manager v0.21.5 # k8s.io/cri-api => k8s.io/cri-api v0.21.5 # k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.5 From 43643d4e7435fc5800fd00ad2e9ac9e290a03a91 Mon Sep 17 00:00:00 2001 From: Ram Date: Mon, 19 Sep 2022 23:47:34 +0530 Subject: [PATCH 43/97] Snapshotter: implement find snapshot api for portworx driver - remove unnecessary call for WaitSnapshot which list all volumesnapshotdata Signed-off-by: Ram --- drivers/volume/portworx/portworx.go | 26 ++++++++++++++++++-- pkg/migration/controllers/migration.go | 8 +++++- pkg/snapshot/controllers/snapshot.go | 9 ++----- pkg/snapshot/controllers/snapshotschedule.go | 5 ++++ 4 files changed, 38 insertions(+), 10 deletions(-) diff --git a/drivers/volume/portworx/portworx.go b/drivers/volume/portworx/portworx.go index b0c6494124..e7eb7ea567 100644 --- a/drivers/volume/portworx/portworx.go +++ b/drivers/volume/portworx/portworx.go @@ -1966,9 +1966,31 @@ func (p *portworx) DescribeSnapshot(snapshotData *crdv1.VolumeSnapshotData) (*[] return &snapConditions, true, err } -// TODO: Implement FindSnapshot +// FindSnapshot return snapshotdata source for created portworx snapshot +// Note: we wait for underlying driver snapshot creation in SnapshotCreate() only, +// This will be called by snapshotter every time new snapshot is created func (p *portworx) FindSnapshot(tags *map[string]string) (*crdv1.VolumeSnapshotDataSource, *[]crdv1.VolumeSnapshotCondition, error) { - return nil, nil, &errors.ErrNotImplemented{} + if tags == nil || len(*tags) == 0 { + return nil, nil, fmt.Errorf("empty tags received for snapshots") + } + name := (*tags)[snapshotter.CloudSnapshotCreatedForVolumeSnapshotNameTag] + namespace := (*tags)[snapshotter.CloudSnapshotCreatedForVolumeSnapshotNamespaceTag] + logrus.Infof("Find snapshotdata for snapshot: %s/%s", namespace, name) + if name == "" || namespace == "" { + return nil, nil, fmt.Errorf("empty snapshot metadata %s/%s", namespace, name) + } + snap, err := k8sextops.Instance().GetSnapshot(name, namespace) + if err != nil { + return nil, nil, fmt.Errorf("unable to retrieve snapshot %s/%s, err: %v", namespace, name, err) + } + snapData, err := k8sextops.Instance().GetSnapshotData(snap.Spec.SnapshotDataName) + if err != nil { + // this means that snapshotdata object by external snapshotter is not created + // lets return nil so that reconciler call findsnapshot() again + return nil, nil, fmt.Errorf("unable to retrieve snapshotdata object %s/%s, err: %v", namespace, name, err) + } + logrus.Debugf("Found snapshotdata for snapshot: %s/%s", namespace, name) + return &snapData.Spec.VolumeSnapshotDataSource, &snap.Status.Conditions, nil } func (p *portworx) GetSnapshotType(snap *crdv1.VolumeSnapshot) (string, error) { diff --git a/pkg/migration/controllers/migration.go b/pkg/migration/controllers/migration.go index f2f2f309ab..7ceef07a90 100644 --- a/pkg/migration/controllers/migration.go +++ b/pkg/migration/controllers/migration.go @@ -1182,6 +1182,12 @@ func (m *MigrationController) checkAndUpdateService( object runtime.Unstructured, objHash uint64, ) (bool, error) { + // if transformation spec is provided, always update service with + // transform spec rules + if len(migration.Spec.TransformSpecs) != 0 { + return false, nil + } + var svc v1.Service if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &svc); err != nil { return false, fmt.Errorf("error converting unstructured obj to service resource: %v", err) @@ -2008,7 +2014,7 @@ func (m *MigrationController) applyResources( case "Service": var skipUpdate bool skipUpdate, err = m.checkAndUpdateService(migration, o, objHash) - if err == nil && skipUpdate && len(migration.Spec.TransformSpecs) == 0 { + if err == nil && skipUpdate { break } fallthrough diff --git a/pkg/snapshot/controllers/snapshot.go b/pkg/snapshot/controllers/snapshot.go index 3e232a0bdc..8351d3291b 100644 --- a/pkg/snapshot/controllers/snapshot.go +++ b/pkg/snapshot/controllers/snapshot.go @@ -69,22 +69,17 @@ func (s *Snapshotter) Start(stopChannel <-chan struct{}) error { return err } if ok { - err = client.CreateCRDV1(aeclientset) + err = client.CreateCRDV1(aeclientset, validateCRDTimeout, validateCRDInterval) if err != nil { return err } } else { - err = client.CreateCRD(aeclientset) + err = client.CreateCRD(aeclientset, validateCRDTimeout, validateCRDInterval) if err != nil { return err } } - err = client.WaitForSnapshotResource(snapshotClient) - if err != nil { - return err - } - plugins := make(map[string]snapshotvolume.Plugin) plugins[s.Driver.String()] = s.Driver.GetSnapshotPlugin() diff --git a/pkg/snapshot/controllers/snapshotschedule.go b/pkg/snapshot/controllers/snapshotschedule.go index d1bf175c8d..6c5e22db06 100644 --- a/pkg/snapshot/controllers/snapshotschedule.go +++ b/pkg/snapshot/controllers/snapshotschedule.go @@ -42,6 +42,7 @@ const ( storkRuleAnnotationPrefix = "stork.libopenstorage.org" preSnapRuleAnnotationKey = storkRuleAnnotationPrefix + "/pre-snapshot-rule" postSnapRuleAnnotationKey = storkRuleAnnotationPrefix + "/post-snapshot-rule" + StorkSnapshotNameLabel = "stork.libopenstorage.org/snapshotName" ) // NewSnapshotScheduleController creates a new instance of SnapshotScheduleController. @@ -317,6 +318,10 @@ func (s *SnapshotScheduleController) startVolumeSnapshot(snapshotSchedule *stork } snapshot.Metadata.Annotations[SnapshotScheduleNameAnnotation] = snapshotSchedule.Name snapshot.Metadata.Annotations[SnapshotSchedulePolicyTypeAnnotation] = string(policyType) + if snapshot.Metadata.Labels == nil { + snapshot.Metadata.Labels = make(map[string]string) + } + snapshot.Metadata.Labels[StorkSnapshotNameLabel] = snapshotName if snapshotSchedule.Spec.PreExecRule != "" { _, err := storkops.Instance().GetRule(snapshotSchedule.Spec.PreExecRule, snapshotSchedule.Namespace) if err != nil { From c2244f9dc5fdf659e4649420df4064ae1dad681d Mon Sep 17 00:00:00 2001 From: Ram Date: Wed, 12 Oct 2022 17:34:39 +0530 Subject: [PATCH 44/97] vendor updates - external snapshotter Signed-off-by: Ram --- go.mod | 4 +- go.sum | 2 + .../snapshot/pkg/client/client.go | 65 +++++++++++++++---- .../pkg/controller/snapshotter/snapshotter.go | 14 ++-- vendor/modules.txt | 5 +- 5 files changed, 69 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index fa23974dd8..20b333f8b1 100644 --- a/go.mod +++ b/go.mod @@ -73,8 +73,8 @@ replace ( //github.com/heptio/ark => github.com/heptio/ark v1.0.0 github.com/heptio/velero => github.com/heptio/velero v1.0.0 github.com/kubernetes-csi/external-snapshotter/client/v4 => github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0 - github.com/kubernetes-incubator/external-storage => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 - github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc7 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 + github.com/kubernetes-incubator/external-storage => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 + github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc10 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 github.com/libopenstorage/autopilot-api => github.com/libopenstorage/autopilot-api v0.6.1-0.20210301232050-ca2633c6e114 github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20220707215604-afbea03c04c5 //github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 diff --git a/go.sum b/go.sum index 0815a64f41..acf4bdfd9b 100644 --- a/go.sum +++ b/go.sum @@ -1107,6 +1107,8 @@ github.com/libopenstorage/autopilot-api v0.6.1-0.20210301232050-ca2633c6e114/go. github.com/libopenstorage/cloudops v0.0.0-20190815012442-6e0d676b6c3e/go.mod h1:quSDXGC3Fhc+pBwMRIi1Gk+kaSfBDZo5rRsftapTzGE= github.com/libopenstorage/cloudops v0.0.0-20200604165016-9cc0977d745e/go.mod h1:5Qie78eVLLXqLkLCq1+0HyJzjpdRCHyeg9LWlU0WPfU= github.com/libopenstorage/cloudops v0.0.0-20220420143942-8bdd341e5b41/go.mod h1:zigCEUGrJZbK/1FN6+SHMuMjS6vjeSKxuo0G4Ars4Cg= +github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 h1:q21CLGSi9DhNBBuJuitquA/T6FwLV3KNZxaJpxQbOLc= +github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10/go.mod h1:nffpoeodwwp+wwngmBGbLBCd7TZ9GxHLtxKoaLRW6K4= github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 h1:mHp7bfGyHwG4P8dhHEMJ775KLmcjv3tcA2Uc+5nGpXg= github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7/go.mod h1:nffpoeodwwp+wwngmBGbLBCd7TZ9GxHLtxKoaLRW6K4= github.com/libopenstorage/gossip v0.0.0-20190507031959-c26073a01952/go.mod h1:TjXt2Iz2bTkpfc4Q6xN0ttiNipTVwEEYoZSMZHlfPek= diff --git a/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/client/client.go b/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/client/client.go index bf945fc60c..b934abea57 100644 --- a/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/client/client.go +++ b/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/client/client.go @@ -18,6 +18,7 @@ package client import ( "context" + "fmt" "reflect" "time" @@ -68,7 +69,7 @@ func NewClient(cfg *rest.Config) (*rest.RESTClient, *runtime.Scheme, error) { } // CreateCRD creates CustomResourceDefinition -func CreateCRD(clientset apiextensionsclient.Interface) error { +func CreateCRD(clientset apiextensionsclient.Interface, retryInterval, timeout time.Duration) error { crd := &apiextensionsv1beta1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ Name: crdv1.VolumeSnapshotDataResourcePlural + "." + crdv1.GroupName, @@ -78,8 +79,8 @@ func CreateCRD(clientset apiextensionsclient.Interface) error { Version: crdv1.SchemeGroupVersion.Version, Scope: apiextensionsv1beta1.ClusterScoped, Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Plural: crdv1.VolumeSnapshotDataResourcePlural, - Kind: reflect.TypeOf(crdv1.VolumeSnapshotData{}).Name(), + Plural: crdv1.VolumeSnapshotDataResourcePlural, + Kind: reflect.TypeOf(crdv1.VolumeSnapshotData{}).Name(), ShortNames: storkVolumeSnapshotDataShortNames, }, }, @@ -100,8 +101,8 @@ func CreateCRD(clientset apiextensionsclient.Interface) error { Version: crdv1.SchemeGroupVersion.Version, Scope: apiextensionsv1beta1.NamespaceScoped, Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Plural: crdv1.VolumeSnapshotResourcePlural, - Kind: reflect.TypeOf(crdv1.VolumeSnapshot{}).Name(), + Plural: crdv1.VolumeSnapshotResourcePlural, + Kind: reflect.TypeOf(crdv1.VolumeSnapshot{}).Name(), ShortNames: storkVolumeSnapshotShortNames, }, }, @@ -111,11 +112,31 @@ func CreateCRD(clientset apiextensionsclient.Interface) error { glog.Fatalf("failed to create VolumeSnapshotResource: %#v, err: %#v", res, err) } - return nil + return wait.PollImmediate(retryInterval, timeout, func() (bool, error) { + crd, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return false, nil + } else if err != nil { + return false, err + } + for _, cond := range crd.Status.Conditions { + switch cond.Type { + case apiextensionsv1beta1.Established: + if cond.Status == apiextensionsv1beta1.ConditionTrue { + return true, nil + } + case apiextensionsv1beta1.NamesAccepted: + if cond.Status == apiextensionsv1beta1.ConditionFalse { + return false, fmt.Errorf("name conflict: %v", cond.Reason) + } + } + } + return false, nil + }) } // CreateCRDV1 creates CustomResourceDefinition for v1 apiVersion -func CreateCRDV1(clientset apiextensionsclient.Interface) error { +func CreateCRDV1(clientset apiextensionsclient.Interface, retryInterval, timeout time.Duration) error { setSchema := true annot := make(map[string]string) annot["api-approved.kubernetes.io"] = "https://github.com/kubernetes-csi/external-snapshotter/pull/419" @@ -139,8 +160,8 @@ func CreateCRDV1(clientset apiextensionsclient.Interface) error { }, Scope: apiextensionsv1.ClusterScoped, Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: crdv1.VolumeSnapshotDataResourcePlural, - Kind: reflect.TypeOf(crdv1.VolumeSnapshotData{}).Name(), + Plural: crdv1.VolumeSnapshotDataResourcePlural, + Kind: reflect.TypeOf(crdv1.VolumeSnapshotData{}).Name(), ShortNames: storkVolumeSnapshotDataShortNames, }, }, @@ -172,8 +193,8 @@ func CreateCRDV1(clientset apiextensionsclient.Interface) error { }, Scope: apiextensionsv1.NamespaceScoped, Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: crdv1.VolumeSnapshotResourcePlural, - Kind: reflect.TypeOf(crdv1.VolumeSnapshot{}).Name(), + Plural: crdv1.VolumeSnapshotResourcePlural, + Kind: reflect.TypeOf(crdv1.VolumeSnapshot{}).Name(), ShortNames: storkVolumeSnapshotShortNames, }, }, @@ -183,7 +204,27 @@ func CreateCRDV1(clientset apiextensionsclient.Interface) error { glog.Fatalf("failed to create VolumeSnapshotResource: %#v, err: %#v", res, err) } - return nil + return wait.PollImmediate(retryInterval, timeout, func() (bool, error) { + crd, err := clientset.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return false, nil + } else if err != nil { + return false, err + } + for _, cond := range crd.Status.Conditions { + switch cond.Type { + case apiextensionsv1.Established: + if cond.Status == apiextensionsv1.ConditionTrue { + return true, nil + } + case apiextensionsv1.NamesAccepted: + if cond.Status == apiextensionsv1.ConditionFalse { + return false, fmt.Errorf("name conflict: %v", cond.Reason) + } + } + } + return false, nil + }) } // WaitForSnapshotResource waits for the snapshot resource diff --git a/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/controller/snapshotter/snapshotter.go b/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/controller/snapshotter/snapshotter.go index eced203898..75da61ca04 100644 --- a/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/controller/snapshotter/snapshotter.go +++ b/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/controller/snapshotter/snapshotter.go @@ -42,6 +42,7 @@ const ( snapshotMetadataPVName = "SnapshotMetadata-PVName" snapshotDataNamePrefix = "k8s-volume-snapshot" pvNameLabel = "pvName" + StorkSnapshotNameLabel = "stork.libopenstorage.org/snapshotName" defaultExponentialBackOffOnError = true // volumeSnapshot* is configuration of exponential backoff for @@ -376,6 +377,14 @@ func (vs *volumeSnapshotter) updateSnapshotIfExists(uniqueSnapshotName string, s glog.Infof("No tag can be found in snapshot metadata %s", uniqueSnapshotName) return statusNew, snapshot, nil } + + // Find snapshot through cloud provider by existing tags, and create VolumeSnapshotData if such snapshot is found + snapshotDataSource, conditions, err = vs.findSnapshotByTags(snapshotName, snapshot) + if err != nil { + glog.Infof("unable to find snapshot by looking at tags %s, err: %v", uniqueSnapshotName, err) + return statusNew, snapshot, nil + } + // Check whether snapshotData object is already created or not. If yes, snapshot is already // triggered through cloud provider, bind it and return pending state if snapshotDataObj = vs.getSnapshotDataFromSnapshotName(uniqueSnapshotName); snapshotDataObj != nil { @@ -386,11 +395,6 @@ func (vs *volumeSnapshotter) updateSnapshotIfExists(uniqueSnapshotName string, s } return statusPending, snapshotObj, nil } - // Find snapshot through cloud provider by existing tags, and create VolumeSnapshotData if such snapshot is found - snapshotDataSource, conditions, err = vs.findSnapshotByTags(snapshotName, snapshot) - if err != nil { - return statusNew, snapshot, nil - } // Snapshot is found. Create VolumeSnapshotData, bind VolumeSnapshotData to VolumeSnapshot, and update VolumeSnapshot status glog.Infof("updateSnapshotIfExists: create VolumeSnapshotData object for VolumeSnapshot %s.", uniqueSnapshotName) pvName, ok := snapshot.Metadata.Labels[pvNameLabel] diff --git a/vendor/modules.txt b/vendor/modules.txt index 784aaed933..0db8683a7a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -481,7 +481,7 @@ github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1 github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1 -# github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc7 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 +# github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc7 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 ## explicit github.com/kubernetes-incubator/external-storage/snapshot/pkg/apis/crd/v1 github.com/kubernetes-incubator/external-storage/snapshot/pkg/client @@ -1903,7 +1903,8 @@ sigs.k8s.io/yaml # github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.4.1 # github.com/heptio/velero => github.com/heptio/velero v1.0.0 # github.com/kubernetes-csi/external-snapshotter/client/v4 => github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0 -# github.com/kubernetes-incubator/external-storage => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 +# github.com/kubernetes-incubator/external-storage => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 +# github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc10 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 # github.com/libopenstorage/autopilot-api => github.com/libopenstorage/autopilot-api v0.6.1-0.20210301232050-ca2633c6e114 # github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20220707215604-afbea03c04c5 # github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a From 5dd8796111526a7571649415da0d2d29b187a5b1 Mon Sep 17 00:00:00 2001 From: sivakumar subramani Date: Fri, 14 Oct 2022 14:13:02 +0530 Subject: [PATCH 45/97] pb-3157: Added fixes to add proper storage class and provisioner name for PV and PVC during restore. - Added logic to store the storage class in CurrentStorageClassName annotation for PV spec, so that it can used during restore. - Before applying pv spec updating correct storage class and provisioner name based of storageclass map. --- pkg/resourcecollector/persistentvolume.go | 64 +++++++++++++++++++ .../persistentvolumeclaim.go | 26 +++++--- pkg/resourcecollector/resourcecollector.go | 5 +- pkg/utils/utils.go | 16 +++++ 4 files changed, 102 insertions(+), 9 deletions(-) diff --git a/pkg/resourcecollector/persistentvolume.go b/pkg/resourcecollector/persistentvolume.go index cf11984fcb..67f75e1346 100644 --- a/pkg/resourcecollector/persistentvolume.go +++ b/pkg/resourcecollector/persistentvolume.go @@ -2,6 +2,8 @@ package resourcecollector import ( "fmt" + "github.com/libopenstorage/stork/pkg/utils" + "github.com/sirupsen/logrus" "github.com/libopenstorage/stork/drivers/volume" stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" @@ -10,6 +12,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + k8shelper "k8s.io/component-helpers/storage/volume" ) func (r *ResourceCollector) pvToBeCollected( @@ -90,10 +93,45 @@ func (r *ResourceCollector) pvToBeCollected( func (r *ResourceCollector) preparePVResourceForCollection( object runtime.Unstructured, ) error { + var pv v1.PersistentVolume + var currentSc string + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &pv); err != nil { + return err + } + // Some time pv spec does not contains the storage class. + // In that case, we will get it from pvc spec. + if len(pv.Spec.StorageClassName) == 0 { + pvc, err := r.coreOps.GetPersistentVolumeClaim(pv.Spec.ClaimRef.Name, pv.Spec.ClaimRef.Namespace) + if err != nil { + return err + } + currentSc, err = utils.GetStorageClassNameForPVC(pvc) + if err != nil { + // Not returning error as there might be some cases, where PVC might not have storage class. + // A case where a PV can be manually bind mounted to a volume ( backend storage volume) + logrus.Debugf("preparePVResourceForCollection: failed to fetch storage class from PVC %v: %v", pv.Spec.ClaimRef.Name, err) + } + } else { + currentSc = pv.Spec.StorageClassName + } err := unstructured.SetNestedField(object.UnstructuredContent(), nil, "spec", "claimRef") if err != nil { return err } + if len(currentSc) > 0 { + annotations, found, err := unstructured.NestedStringMap(object.UnstructuredContent(), "metadata", "annotations") + if err != nil { + return err + } + if !found { + annotations = make(map[string]string) + } + annotations[CurrentStorageClassName] = currentSc + if err := unstructured.SetNestedStringMap(object.UnstructuredContent(), annotations, "metadata", "annotations"); err != nil { + return err + } + object.SetUnstructuredContent(object.UnstructuredContent()) + } return unstructured.SetNestedField(object.UnstructuredContent(), "", "spec", "storageClassName") } @@ -103,6 +141,7 @@ func (r *ResourceCollector) preparePVResourceForApply( object runtime.Unstructured, pvNameMappings map[string]string, vInfo []*stork_api.ApplicationRestoreVolumeInfo, + storageClassMappings map[string]string, ) (bool, error) { var updatedName string var present bool @@ -119,6 +158,30 @@ func (r *ResourceCollector) preparePVResourceForApply( if updatedName, present = pvNameMappings[pv.Name]; !present { return true, nil } + // get the storage class name from the CurrentStorageClassName annotation + var oldSc string + var exists bool + var newSc string + if pv.Annotations != nil { + if val, ok := pv.Annotations[CurrentStorageClassName]; ok { + oldSc = val + // delete CurrentStorageClassName annotation before applying + delete(pv.Annotations, CurrentStorageClassName) + if newSc, exists = storageClassMappings[oldSc]; exists && len(newSc) > 0 { + // If the oldSc is present the storageclass map, get the new sc and update it in the pv spec + // Get the provisioner name from the new sc and update it + storageClass, err := r.storageOps.GetStorageClass(newSc) + if err != nil { + return false, fmt.Errorf("failed in getting the storage class [%v]: %v", newSc, err) + } + pv.Annotations[k8shelper.AnnDynamicallyProvisioned] = storageClass.Provisioner + pv.Spec.StorageClassName = newSc + } else { + // if the storageclass map does not have the oldSc name, update the PV spec with the oldSC itself before applying. + pv.Spec.StorageClassName = oldSc + } + } + } pv.Name = updatedName var driverName string @@ -130,6 +193,7 @@ func (r *ResourceCollector) preparePVResourceForApply( break } } + // in case of non-restore call make sure resourcecollector // checks proper driver by looking at pv name if driverName == "" { diff --git a/pkg/resourcecollector/persistentvolumeclaim.go b/pkg/resourcecollector/persistentvolumeclaim.go index 1b1307e5d0..e1ac23cc8b 100644 --- a/pkg/resourcecollector/persistentvolumeclaim.go +++ b/pkg/resourcecollector/persistentvolumeclaim.go @@ -5,6 +5,7 @@ import ( "github.com/libopenstorage/stork/drivers/volume" stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/libopenstorage/stork/pkg/utils" "github.com/portworx/sched-ops/k8s/core" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -101,11 +102,25 @@ func (r *ResourceCollector) preparePVCResourceForApply( // In the case of storageClassMappings, we need to reset the // storage class annotation and the provisioner annotation var newSc string + var currentSc string var exists bool var provisioner string - if val, ok := pvc.Annotations[v1.BetaStorageClassAnnotation]; ok { - if newSc, exists = storageClassMappings[val]; exists && len(newSc) > 0 { - pvc.Annotations[v1.BetaStorageClassAnnotation] = newSc + // Get the existing storage class from the pvc spec + // It can be in BetaStorageClassAnnotation annotation or in the spec. + currentSc, err := utils.GetStorageClassNameForPVC(&pvc) + if err != nil { + // If the storageclassMapping is present, then we can assume that storage class should be present in the PVC spec. + // So handling the error and returning it to caller. + return false, err + } + if len(currentSc) != 0 { + if newSc, exists = storageClassMappings[currentSc]; exists && len(newSc) > 0 { + if _, ok := pvc.Annotations[v1.BetaStorageClassAnnotation]; ok { + pvc.Annotations[v1.BetaStorageClassAnnotation] = newSc + } + if pvc.Spec.StorageClassName != nil && len(*pvc.Spec.StorageClassName) > 0 { + *pvc.Spec.StorageClassName = newSc + } } } if len(newSc) > 0 { @@ -127,11 +142,6 @@ func (r *ResourceCollector) preparePVCResourceForApply( } } - if len(storageClassMappings) > 0 && pvc.Spec.StorageClassName != nil { - if newSc, exists := storageClassMappings[*pvc.Spec.StorageClassName]; exists && len(newSc) > 0 { - pvc.Spec.StorageClassName = &newSc - } - } o, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pvc) if err != nil { return false, err diff --git a/pkg/resourcecollector/resourcecollector.go b/pkg/resourcecollector/resourcecollector.go index 9b259ab3c9..3776d687db 100644 --- a/pkg/resourcecollector/resourcecollector.go +++ b/pkg/resourcecollector/resourcecollector.go @@ -50,6 +50,9 @@ const ( // TransformedResourceName is the annotation used to check if resource has been updated // as per transformation rules TransformedResourceName = "stork.libopenstorage.org/resourcetransformation-name" + // CurrentStorageClassName is the annotation used to store the current storage class of the PV before + // taking backup as we will reset it to empty. + CurrentStorageClassName = "stork.libopenstorage.org/current-storage-class-name" // ServiceKind for k8s service resources ServiceKind = "Service" @@ -817,7 +820,7 @@ func (r *ResourceCollector) PrepareResourceForApply( } return true, nil case "PersistentVolume": - return r.preparePVResourceForApply(object, pvNameMappings, vInfo) + return r.preparePVResourceForApply(object, pvNameMappings, vInfo, storageClassMappings) case "PersistentVolumeClaim": return r.preparePVCResourceForApply(object, allObjects, pvNameMappings, storageClassMappings, vInfo) case "ClusterRoleBinding": diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index e27f475ce1..33b575490c 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -5,6 +5,7 @@ import ( "github.com/libopenstorage/stork/drivers" "github.com/portworx/sched-ops/k8s/core" "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" "strings" ) @@ -66,3 +67,18 @@ func GetTrimmedGroupName(group string) string { } return group } + +// GetStorageClassNameForPVC - Get the storageClass name from the PVC spec +func GetStorageClassNameForPVC(pvc *v1.PersistentVolumeClaim) (string, error) { + var scName string + if pvc.Spec.StorageClassName != nil && len(*pvc.Spec.StorageClassName) > 0 { + scName = *pvc.Spec.StorageClassName + } else { + scName = pvc.Annotations[v1.BetaStorageClassAnnotation] + } + + if len(scName) == 0 { + return "", fmt.Errorf("PVC: %s does not have a storage class", pvc.Name) + } + return scName, nil +} From 9230c88dfd6ec505ff317f38c2f085a4fe661f57 Mon Sep 17 00:00:00 2001 From: Ram Date: Tue, 18 Oct 2022 18:59:14 +0530 Subject: [PATCH 46/97] PWX-27094: Dont suppress transformation failure msg Signed-off-by: Ram --- pkg/migration/controllers/migration.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/migration/controllers/migration.go b/pkg/migration/controllers/migration.go index 7ceef07a90..aa144e4a34 100644 --- a/pkg/migration/controllers/migration.go +++ b/pkg/migration/controllers/migration.go @@ -407,12 +407,12 @@ func (m *MigrationController) handle(ctx context.Context, migration *stork_api.M return nil } if err := storkops.Instance().ValidateResourceTransformation(resp.Name, ns, 1*time.Minute, 5*time.Second); err != nil { - errMsg := fmt.Sprintf("transformation %s is not in ready state: %s", migration.Spec.TransformSpecs, resp.Status.Status) + errMsg := fmt.Sprintf("transformation %s is not in ready state: %s", migration.Spec.TransformSpecs[0], resp.Status.Status) log.MigrationLog(migration).Errorf(errMsg) m.recorder.Event(migration, v1.EventTypeWarning, string(stork_api.MigrationStatusFailed), - err.Error()) + errMsg) err = m.updateMigrationCR(context.Background(), migration) if err != nil { log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) From 0ff5f95ac82ad297c18d3b21a2396a40dc67e4e4 Mon Sep 17 00:00:00 2001 From: Aditya Dani Date: Fri, 14 Oct 2022 11:34:32 -0700 Subject: [PATCH 47/97] PB-3150: Handle GCE snapshot NotFound error in DeleteBackup implementation - Do not fail CancelBackup / DeleteBackup API if a snapshot was not found. --- drivers/volume/gcp/gcp.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/volume/gcp/gcp.go b/drivers/volume/gcp/gcp.go index d0e33f4fbd..df815e6a62 100644 --- a/drivers/volume/gcp/gcp.go +++ b/drivers/volume/gcp/gcp.go @@ -347,6 +347,12 @@ func (g *gcp) DeleteBackup(backup *storkapi.ApplicationBackup) (bool, error) { } _, err := service.Snapshots.Delete(vInfo.Options["projectID"], vInfo.BackupID).Do() if err != nil { + if gceErr, ok := err.(*googleapi.Error); ok { + if gceErr.Code == http.StatusNotFound { + // snapshot is already deleted + continue + } + } return true, err } } From 191ecb73aaf8abc682520d706267adc57fdf7d35 Mon Sep 17 00:00:00 2001 From: Aditya Dani Date: Tue, 18 Oct 2022 22:13:04 -0700 Subject: [PATCH 48/97] Update golang version to 1.17.11 --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index cd89ca2a0e..880adafb8d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,7 +2,7 @@ sudo: required dist: xenial language: go go: - - 1.17.3 + - 1.17.11 before_install: - sudo apt-get update -yq || true - sudo apt-get install go-md2man -y From 4374fedc83aa7a28fd5da24652fa476a00b54bba Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Tue, 18 Oct 2022 14:10:06 +0000 Subject: [PATCH 49/97] pb-3162: Resetting secret token data for service account token before restoring --- pkg/resourcecollector/secret.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/resourcecollector/secret.go b/pkg/resourcecollector/secret.go index 14ccc759b3..6a0b02dd7c 100644 --- a/pkg/resourcecollector/secret.go +++ b/pkg/resourcecollector/secret.go @@ -44,6 +44,10 @@ func (r *ResourceCollector) prepareSecretForApply( if secret.Annotations != nil { if _, ok := secret.Annotations[serviceAccountUIDKey]; ok { secret.Annotations[serviceAccountUIDKey] = "" + // Reset the secret token data to empty, so that new service account token will be updated by k8s, during restore. + if secret.Data["token"] != nil { + secret.Data["token"] = nil + } } } o, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&secret) From 6336f00b6e990e0135229ebb2d28af8026bf04e5 Mon Sep 17 00:00:00 2001 From: Ram Date: Thu, 20 Oct 2022 11:31:39 +0530 Subject: [PATCH 50/97] Handle len check for keypair resource transformation value Signed-off-by: Ram --- pkg/migration/controllers/resourcetransformation.go | 1 + pkg/resourcecollector/resourcetransformation.go | 9 ++++++--- pkg/schedule/schedule.go | 3 +++ 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/pkg/migration/controllers/resourcetransformation.go b/pkg/migration/controllers/resourcetransformation.go index a183f60cf8..ece907030c 100644 --- a/pkg/migration/controllers/resourcetransformation.go +++ b/pkg/migration/controllers/resourcetransformation.go @@ -270,6 +270,7 @@ func (r *ResourceTransformationController) validateTransformResource(ctx context log.TransformLog(transform).Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, kind, resInfo.Namespace, resInfo.Name, err) resInfo.Status = stork_api.ResourceTransformationStatusFailed resInfo.Reason = err.Error() + return err } unstructured, ok := object.(*unstructured.Unstructured) if !ok { diff --git a/pkg/resourcecollector/resourcetransformation.go b/pkg/resourcecollector/resourcetransformation.go index bfc1c09e34..5b093fec89 100644 --- a/pkg/resourcecollector/resourcetransformation.go +++ b/pkg/resourcecollector/resourcetransformation.go @@ -84,19 +84,22 @@ func TransformResources( var value interface{} if path.Type == stork_api.KeyPairResourceType { currMap, _, err := unstructured.NestedMap(content, strings.Split(path.Path, ".")...) - if err != nil { - return fmt.Errorf("unable to find suspend path, err: %v", err) + if err != nil || len(currMap) == 0 { + return fmt.Errorf("unable to find spec path, err: %v", err) } mapList := strings.Split(path.Value, ",") for _, val := range mapList { keyPair := strings.Split(val, ":") + if len(keyPair) != 2 { + return fmt.Errorf("invalid keypair value format :%s", keyPair) + } currMap[keyPair[0]] = keyPair[1] } value = currMap } else if path.Type == stork_api.SliceResourceType { currList, _, err := unstructured.NestedSlice(content, strings.Split(path.Path, ".")...) if err != nil { - return fmt.Errorf("unable to find suspend path, err: %v", err) + return fmt.Errorf("unable to find spec path, err: %v", err) } arrList := strings.Split(path.Value, ",") for _, val := range arrList { diff --git a/pkg/schedule/schedule.go b/pkg/schedule/schedule.go index eab730e35e..6581d68c61 100644 --- a/pkg/schedule/schedule.go +++ b/pkg/schedule/schedule.go @@ -231,6 +231,9 @@ func GetOptions(policyName string, namespace string, policyType stork_api.Schedu return schedulePolicy.Policy.Interval.Options, nil case stork_api.SchedulePolicyTypeDaily: options := schedulePolicy.Policy.Daily.Options + if len(options) == 0 { + options = make(map[string]string) + } scheduledDay, ok := stork_api.Days[schedulePolicy.Policy.Daily.ForceFullSnapshotDay] if ok { currentDay := GetCurrentTime().Weekday() From 96850c0ce27fc091a553be600e7b1480257d374e Mon Sep 17 00:00:00 2001 From: sivakumar subramani Date: Thu, 20 Oct 2022 22:12:41 +0530 Subject: [PATCH 51/97] pb-3174: Added csi section in the case portworx volume restore. - In the case of portworx volume, we allow storageclass mapping. - This will enable user to backup portworx volume and restore it restore it as portworx csi volume. - Also from non-secure portworx cluster to secure portworx cluster. - Adding csi section, if the destination storageclass is a csi provisioner and also add secure token, if it is secure enable cluster. --- drivers/volume/aws/aws.go | 1 + drivers/volume/azure/azure.go | 1 + drivers/volume/csi/csi.go | 3 +- drivers/volume/gcp/gcp.go | 1 + drivers/volume/kdmp/kdmp.go | 1 + drivers/volume/linstor/linstor.go | 1 + drivers/volume/mock/mock.go | 1 + drivers/volume/portworx/portworx.go | 79 +++++++++++++++++-- drivers/volume/volume.go | 2 +- .../controllers/applicationclone.go | 2 +- pkg/migration/controllers/migration.go | 2 +- pkg/resourcecollector/persistentvolume.go | 3 +- pkg/resourcecollector/resourcecollector.go | 2 +- 13 files changed, 85 insertions(+), 14 deletions(-) diff --git a/drivers/volume/aws/aws.go b/drivers/volume/aws/aws.go index c4fed686bf..f762b887c9 100644 --- a/drivers/volume/aws/aws.go +++ b/drivers/volume/aws/aws.go @@ -397,6 +397,7 @@ func (a *aws) DeleteBackup(backup *storkapi.ApplicationBackup) (bool, error) { func (a *aws) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { if pv.Spec.CSI != nil { pv.Spec.CSI.VolumeHandle = pv.Name diff --git a/drivers/volume/azure/azure.go b/drivers/volume/azure/azure.go index 08f02a7c3d..9ea17d2bc0 100644 --- a/drivers/volume/azure/azure.go +++ b/drivers/volume/azure/azure.go @@ -408,6 +408,7 @@ func (a *azure) DeleteBackup(backup *storkapi.ApplicationBackup) (bool, error) { func (a *azure) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { disk, err := a.diskClient.Get(context.TODO(), a.resourceGroup, pv.Name) if err != nil { diff --git a/drivers/volume/csi/csi.go b/drivers/volume/csi/csi.go index 491fac2203..eb1da78882 100644 --- a/drivers/volume/csi/csi.go +++ b/drivers/volume/csi/csi.go @@ -89,7 +89,7 @@ type BackupObjectv1beta1Csi struct { V1SnapshotRequired bool } -// GetVolumeSnapshotContent retrieves a backed up volume snapshot +// GetVolumeSnapshotContent retrieves a backed up volume snapshot func (cbo *csiBackupObject) GetVolumeSnapshot(snapshotID string) (interface{}, error) { var vs interface{} var ok bool @@ -1170,6 +1170,7 @@ func (c *csi) DeleteBackup(backup *storkapi.ApplicationBackup) (bool, error) { func (c *csi) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { return pv, nil } diff --git a/drivers/volume/gcp/gcp.go b/drivers/volume/gcp/gcp.go index df815e6a62..73db3f64d8 100644 --- a/drivers/volume/gcp/gcp.go +++ b/drivers/volume/gcp/gcp.go @@ -362,6 +362,7 @@ func (g *gcp) DeleteBackup(backup *storkapi.ApplicationBackup) (bool, error) { func (g *gcp) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { if pv.Spec.CSI != nil { key, err := common.VolumeIDToKey(pv.Spec.CSI.VolumeHandle) diff --git a/drivers/volume/kdmp/kdmp.go b/drivers/volume/kdmp/kdmp.go index d44ffc9a95..cabce3d341 100644 --- a/drivers/volume/kdmp/kdmp.go +++ b/drivers/volume/kdmp/kdmp.go @@ -576,6 +576,7 @@ func doKdmpDeleteJob(id string, driver drivers.Interface) (bool, error) { func (k *kdmp) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { return pv, nil } diff --git a/drivers/volume/linstor/linstor.go b/drivers/volume/linstor/linstor.go index 52ed06cf59..66ad3ddbb3 100644 --- a/drivers/volume/linstor/linstor.go +++ b/drivers/volume/linstor/linstor.go @@ -395,6 +395,7 @@ func (l *linstor) Stop() error { func (l *linstor) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { return pv, nil diff --git a/drivers/volume/mock/mock.go b/drivers/volume/mock/mock.go index 7a75360280..f43fbb3cbf 100644 --- a/drivers/volume/mock/mock.go +++ b/drivers/volume/mock/mock.go @@ -309,6 +309,7 @@ func (m *Driver) GetSnapshotType(snap *snapv1.VolumeSnapshot) (string, error) { func (m *Driver) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { return pv, nil diff --git a/drivers/volume/portworx/portworx.go b/drivers/volume/portworx/portworx.go index e7eb7ea567..1e066455ab 100644 --- a/drivers/volume/portworx/portworx.go +++ b/drivers/volume/portworx/portworx.go @@ -165,17 +165,24 @@ const ( noToken = "notoken" // templatizedNamespace is the CSI templatized parameter for namespace templatizedNamespace = "${pvc.namespace}" - proxyEndpoint = "proxy_endpoint" - proxyPath = "proxy_nfs_exportpath" - pureBackendParam = "backend" - pureFileParam = "pure_file" - pureBlockParam = "pure_block" + // templatizedName is the CSI templatized parameter for name + templatizedName = "${pvc.name}" + proxyEndpoint = "proxy_endpoint" + proxyPath = "proxy_nfs_exportpath" + pureBackendParam = "backend" + pureFileParam = "pure_file" + pureBlockParam = "pure_block" statfsSOName = "px_statfs.so" statfsSODirInStork = "/" statfsSODirInVirtLauncher = "/etc" statfsConfigMapName = "px-statfs" statfsVolName = "px-statfs" + + nodePublishSecretName = "csi.storage.k8s.io/node-publish-secret-name" + controllerExpandSecretName = "csi.storage.k8s.io/controller-expand-secret-name" + nodePublishSecretNamespace = "csi.storage.k8s.io/node-publish-secret-namespace" + controllerExpandSecretNamespace = "csi.storage.k8s.io/controller-expand-secret-namespace" ) type cloudSnapStatus struct { @@ -375,9 +382,9 @@ func (p *portworx) initPortworxClients() error { return err } -// tokenGenerator generates authorization token for system.admin -// when shared secret is not configured authz token is empty string -// this let Openstorage API clients be bootstrapped with no authorization (by accepting empty token) +// tokenGenerator generates authorization token for system.admin +// when shared secret is not configured authz token is empty string +// this let Openstorage API clients be bootstrapped with no authorization (by accepting empty token) func (p *portworx) tokenGenerator() (string, error) { if len(p.jwtSharedSecret) == 0 { return "", nil @@ -2592,7 +2599,63 @@ func (p *portworx) CancelMigration(migration *storkapi.Migration) error { func (p *portworx) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { + // Get the pv storageclass and get the provision detail and decide on csi section. + if len(pv.Spec.StorageClassName) != 0 { + sc, err := storage.Instance().GetStorageClass(pv.Spec.StorageClassName) + if err != nil { + return nil, fmt.Errorf("failed in getting the storage class [%v]: %v", pv.Spec.StorageClassName, err) + } + if isCsiProvisioner(sc.Provisioner) { + // add csi section in the pv spec + if pv.Spec.CSI == nil { + pv.Spec.CSI = &v1.CSIPersistentVolumeSource{} + } + // get the destinationNamespace + var dstNamespace string + var exists bool + if dstNamespace, exists = namespaceMapping[vInfo.SourceNamespace]; !exists { + dstNamespace = vInfo.SourceNamespace + } + // Update the controller expand secret + if val, ok := sc.Parameters[controllerExpandSecretName]; ok { + if pv.Spec.CSI.ControllerExpandSecretRef == nil { + pv.Spec.CSI.ControllerExpandSecretRef = &v1.SecretReference{} + } + if val == templatizedName { + pv.Spec.CSI.ControllerExpandSecretRef.Name = vInfo.PersistentVolumeClaim + } else { + pv.Spec.CSI.ControllerExpandSecretRef.Name = val + } + // In the case of portworx volume backup, we will have namespace mapping always. + // So no need to check for the template string as we are going to change the value to destination namespace always. + // If user does not change the namespace, the source and destination namespace in the mapping will be same. + if _, ok := sc.Parameters[controllerExpandSecretNamespace]; ok { + pv.Spec.CSI.ControllerExpandSecretRef.Namespace = dstNamespace + } + } + + // Update the node publish secret + if val, ok := sc.Parameters[nodePublishSecretName]; ok { + if pv.Spec.CSI.NodePublishSecretRef == nil { + pv.Spec.CSI.NodePublishSecretRef = &v1.SecretReference{} + } + if val == templatizedName { + pv.Spec.CSI.NodePublishSecretRef.Name = vInfo.PersistentVolumeClaim + } else { + pv.Spec.CSI.NodePublishSecretRef.Name = val + } + if _, ok := sc.Parameters[nodePublishSecretNamespace]; ok { + pv.Spec.CSI.NodePublishSecretRef.Namespace = dstNamespace + } + } + // Update driver (provisioner) name + pv.Spec.CSI.Driver = sc.Provisioner + // In the case of csi, will set pv.Spec.portworxVolume to nil as we will have csi section now. + pv.Spec.PortworxVolume = nil + } + } if pv.Spec.CSI != nil { pv.Spec.CSI.VolumeHandle = pv.Name diff --git a/drivers/volume/volume.go b/drivers/volume/volume.go index 916194901e..f72c629cdb 100644 --- a/drivers/volume/volume.go +++ b/drivers/volume/volume.go @@ -192,7 +192,7 @@ type MigratePluginInterface interface { CancelMigration(*storkapi.Migration) error // Update the PVC spec to point to the migrated volume on the destination // cluster - UpdateMigratedPersistentVolumeSpec(*v1.PersistentVolume, *storkapi.ApplicationRestoreVolumeInfo) (*v1.PersistentVolume, error) + UpdateMigratedPersistentVolumeSpec(*v1.PersistentVolume, *storkapi.ApplicationRestoreVolumeInfo, map[string]string) (*v1.PersistentVolume, error) } // ClusterDomainsPluginInterface Interface to manage cluster domains diff --git a/pkg/applicationmanager/controllers/applicationclone.go b/pkg/applicationmanager/controllers/applicationclone.go index 9b223ddf3d..e5541e61cc 100644 --- a/pkg/applicationmanager/controllers/applicationclone.go +++ b/pkg/applicationmanager/controllers/applicationclone.go @@ -588,7 +588,7 @@ func (a *ApplicationCloneController) preparePVResource( return err } - _, err := a.volDriver.UpdateMigratedPersistentVolumeSpec(&pv, nil) + _, err := a.volDriver.UpdateMigratedPersistentVolumeSpec(&pv, nil, nil) if err != nil { return err } diff --git a/pkg/migration/controllers/migration.go b/pkg/migration/controllers/migration.go index aa144e4a34..0db1414170 100644 --- a/pkg/migration/controllers/migration.go +++ b/pkg/migration/controllers/migration.go @@ -1300,7 +1300,7 @@ func (m *MigrationController) preparePVResource( } pv.Annotations[PVReclaimAnnotation] = string(pv.Spec.PersistentVolumeReclaimPolicy) pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimRetain - _, err := m.volDriver.UpdateMigratedPersistentVolumeSpec(&pv, nil) + _, err := m.volDriver.UpdateMigratedPersistentVolumeSpec(&pv, nil, nil) if err != nil { return err } diff --git a/pkg/resourcecollector/persistentvolume.go b/pkg/resourcecollector/persistentvolume.go index 67f75e1346..4181ec1edb 100644 --- a/pkg/resourcecollector/persistentvolume.go +++ b/pkg/resourcecollector/persistentvolume.go @@ -142,6 +142,7 @@ func (r *ResourceCollector) preparePVResourceForApply( pvNameMappings map[string]string, vInfo []*stork_api.ApplicationRestoreVolumeInfo, storageClassMappings map[string]string, + namespaceMappings map[string]string, ) (bool, error) { var updatedName string var present bool @@ -208,7 +209,7 @@ func (r *ResourceCollector) preparePVResourceForApply( if err != nil { return false, err } - _, err = driver.UpdateMigratedPersistentVolumeSpec(&pv, volumeInfo) + _, err = driver.UpdateMigratedPersistentVolumeSpec(&pv, volumeInfo, namespaceMappings) if err != nil { return false, err } diff --git a/pkg/resourcecollector/resourcecollector.go b/pkg/resourcecollector/resourcecollector.go index 3776d687db..882173039a 100644 --- a/pkg/resourcecollector/resourcecollector.go +++ b/pkg/resourcecollector/resourcecollector.go @@ -820,7 +820,7 @@ func (r *ResourceCollector) PrepareResourceForApply( } return true, nil case "PersistentVolume": - return r.preparePVResourceForApply(object, pvNameMappings, vInfo, storageClassMappings) + return r.preparePVResourceForApply(object, pvNameMappings, vInfo, storageClassMappings, namespaceMappings) case "PersistentVolumeClaim": return r.preparePVCResourceForApply(object, allObjects, pvNameMappings, storageClassMappings, vInfo) case "ClusterRoleBinding": From 87df6063b81db6dccb956848b2007bc08ef4e465 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Wed, 26 Oct 2022 08:24:28 +0000 Subject: [PATCH 52/97] ptx-13566: updating the destination namespace from the namespace mapping only when the namespace in SC is templatized. --- drivers/volume/portworx/portworx.go | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/drivers/volume/portworx/portworx.go b/drivers/volume/portworx/portworx.go index 1e066455ab..9bd73b5b24 100644 --- a/drivers/volume/portworx/portworx.go +++ b/drivers/volume/portworx/portworx.go @@ -2628,11 +2628,15 @@ func (p *portworx) UpdateMigratedPersistentVolumeSpec( } else { pv.Spec.CSI.ControllerExpandSecretRef.Name = val } - // In the case of portworx volume backup, we will have namespace mapping always. - // So no need to check for the template string as we are going to change the value to destination namespace always. - // If user does not change the namespace, the source and destination namespace in the mapping will be same. - if _, ok := sc.Parameters[controllerExpandSecretNamespace]; ok { + } + if val, ok := sc.Parameters[controllerExpandSecretNamespace]; ok { + if pv.Spec.CSI.ControllerExpandSecretRef == nil { + pv.Spec.CSI.ControllerExpandSecretRef = &v1.SecretReference{} + } + if val == templatizedNamespace { pv.Spec.CSI.ControllerExpandSecretRef.Namespace = dstNamespace + } else { + pv.Spec.CSI.ControllerExpandSecretRef.Namespace = val } } @@ -2646,10 +2650,18 @@ func (p *portworx) UpdateMigratedPersistentVolumeSpec( } else { pv.Spec.CSI.NodePublishSecretRef.Name = val } - if _, ok := sc.Parameters[nodePublishSecretNamespace]; ok { + } + if val, ok := sc.Parameters[nodePublishSecretNamespace]; ok { + if pv.Spec.CSI.NodePublishSecretRef == nil { + pv.Spec.CSI.NodePublishSecretRef = &v1.SecretReference{} + } + if val == templatizedNamespace { pv.Spec.CSI.NodePublishSecretRef.Namespace = dstNamespace + } else { + pv.Spec.CSI.NodePublishSecretRef.Namespace = val } } + // Update driver (provisioner) name pv.Spec.CSI.Driver = sc.Provisioner // In the case of csi, will set pv.Spec.portworxVolume to nil as we will have csi section now. From 65abc8cc4e80371ca3d437fee107190cf0293ada Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Thu, 3 Nov 2022 06:21:47 +0000 Subject: [PATCH 53/97] Changes required in backuplocation CR definition of NFS type. --- pkg/apis/stork/v1alpha1/backuplocation.go | 33 +++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/pkg/apis/stork/v1alpha1/backuplocation.go b/pkg/apis/stork/v1alpha1/backuplocation.go index 99e81d94af..06f0f6e6c4 100644 --- a/pkg/apis/stork/v1alpha1/backuplocation.go +++ b/pkg/apis/stork/v1alpha1/backuplocation.go @@ -41,6 +41,7 @@ type BackupLocationItem struct { S3Config *S3Config `json:"s3Config,omitempty"` AzureConfig *AzureConfig `json:"azureConfig,omitempty"` GoogleConfig *GoogleConfig `json:"googleConfig,omitempty"` + NfsConfig *NfsConfig `json:"nfsConfig,omitempty"` SecretConfig string `json:"secretConfig"` Sync bool `json:"sync"` RepositoryPassword string `json:"repositoryPassword"` @@ -73,6 +74,8 @@ const ( BackupLocationAzure BackupLocationType = "azure" // BackupLocationGoogle stores the backup in Google Cloud Storage BackupLocationGoogle BackupLocationType = "google" + // BackupLocationNFS stores the backup in NFS backed Storage + BackupLocationNFS BackupLocationType = "nfs" ) // ClusterType is the type of the cluster @@ -123,6 +126,12 @@ type GoogleConfig struct { AccountKey string `json:"accountKey"` } +type NfsConfig struct { + ServerAddr string `json:"serverAddr"` + SubPath string `json:"subPath"` + MountOption string `json:"mountOption"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // BackupLocationList is a list of ApplicationBackups @@ -154,6 +163,8 @@ func (bl *BackupLocation) UpdateFromSecret(client kubernetes.Interface) error { return bl.getMergedAzureConfig(client) case BackupLocationGoogle: return bl.getMergedGoogleConfig(client) + case BackupLocationNFS: + return bl.getMergedNfsConfig(client) default: return fmt.Errorf("Invalid BackupLocation type %v", bl.Location.Type) } @@ -176,6 +187,28 @@ func (bl *BackupLocation) UpdateFromClusterSecret(client kubernetes.Interface) e return nil } +func (bl *BackupLocation) getMergedNfsConfig(client kubernetes.Interface) error { + if bl.Location.NfsConfig == nil { + bl.Location.NfsConfig = &NfsConfig{} + } + if bl.Location.SecretConfig != "" { + secretConfig, err := client.CoreV1().Secrets(bl.Namespace).Get(context.TODO(), bl.Location.SecretConfig, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("error getting secretConfig for backupLocation: %v", err) + } + if val, ok := secretConfig.Data["serverAddr"]; ok && val != nil { + bl.Location.NfsConfig.ServerAddr = strings.TrimSuffix(string(val), "\n") + } + if val, ok := secretConfig.Data["subPath"]; ok && val != nil { + bl.Location.NfsConfig.SubPath = strings.TrimSuffix(string(val), "\n") + } + if val, ok := secretConfig.Data["mountOption"]; ok && val != nil { + bl.Location.NfsConfig.MountOption = strings.TrimSuffix(string(val), "\n") + } + } + return nil +} + func (bl *BackupLocation) getMergedS3Config(client kubernetes.Interface) error { if bl.Location.S3Config == nil { bl.Location.S3Config = &S3Config{} From 79207db3116055609eb9d42d271962fdbeba9750 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Sun, 6 Nov 2022 10:54:12 +0000 Subject: [PATCH 54/97] vendor kdmp from 1.2.3 branch and sched-ops from nfs-ea branch --- go.mod | 8 +- go.sum | 3 + .../kdmp/pkg/apis/kdmp/v1alpha1/register.go | 4 + .../pkg/apis/kdmp/v1alpha1/resourcebackup.go | 129 ++++++ .../pkg/apis/kdmp/v1alpha1/resourceexport.go | 152 +++++++ .../kdmp/v1alpha1/zz_generated.deepcopy.go | 387 +++++++++++++++++- .../kdmp/v1alpha1/generated_expansion.go | 4 + .../typed/kdmp/v1alpha1/kdmp_client.go | 10 + .../typed/kdmp/v1alpha1/resourcebackup.go | 185 +++++++++ .../typed/kdmp/v1alpha1/resourceexport.go | 185 +++++++++ .../portworx/kdmp/pkg/controllers/common.go | 50 +++ .../pkg/controllers/dataexport/dataexport.go | 32 +- .../pkg/controllers/dataexport/reconcile.go | 206 ++++++---- .../portworx/kdmp/pkg/drivers/drivers.go | 14 + .../driversinstance/driversinstance.go | 6 + .../pkg/drivers/kopiabackup/kopiabackup.go | 53 ++- .../drivers/kopiabackup/kopiabackuplive.go | 45 +- .../pkg/drivers/kopiadelete/kopiadelete.go | 46 ++- .../kopiamaintenance/kopiamaintenance.go | 46 ++- .../pkg/drivers/kopiarestore/kopiarestore.go | 53 ++- .../kdmp/pkg/drivers/nfsbackup/nfsbackup.go | 277 +++++++++++++ .../kdmp/pkg/drivers/nfsdelete/nfsdelete.go | 253 ++++++++++++ .../kdmp/pkg/drivers/nfsrestore/nfsrestore.go | 296 ++++++++++++++ .../portworx/kdmp/pkg/drivers/options.go | 105 +++++ .../portworx/kdmp/pkg/drivers/utils/common.go | 93 ++++- .../portworx/kdmp/pkg/drivers/utils/utils.go | 379 ++++++++++++++++- .../kdmp/pkg/jobratelimit/jobratelimit.go | 4 + vendor/modules.txt | 10 +- 28 files changed, 2836 insertions(+), 199 deletions(-) create mode 100644 vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourcebackup.go create mode 100644 vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourceexport.go create mode 100644 vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/resourcebackup.go create mode 100644 vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/resourceexport.go create mode 100644 vendor/github.com/portworx/kdmp/pkg/controllers/common.go create mode 100644 vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go create mode 100644 vendor/github.com/portworx/kdmp/pkg/drivers/nfsdelete/nfsdelete.go create mode 100644 vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go diff --git a/go.mod b/go.mod index 20b333f8b1..726d357679 100644 --- a/go.mod +++ b/go.mod @@ -27,9 +27,9 @@ require ( github.com/openshift/api v0.0.0-20210105115604-44119421ec6b github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47 github.com/pborman/uuid v1.2.0 - github.com/portworx/kdmp v0.4.1-0.20220710173715-5d42efc7d149 + github.com/portworx/kdmp v0.4.1-0.20221106080928-1fd01d5e6ed0 github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 - github.com/portworx/sched-ops v1.20.4-rc1.0.20220401024625-dbc61a336f65 + github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 github.com/prometheus/client_golang v1.11.0 github.com/sirupsen/logrus v1.8.1 @@ -52,7 +52,7 @@ require ( k8s.io/apiserver v0.21.5 k8s.io/cli-runtime v0.21.5 k8s.io/client-go v12.0.0+incompatible - k8s.io/code-generator v0.21.5 + k8s.io/code-generator v0.22.1 k8s.io/component-helpers v0.24.0 k8s.io/kube-scheduler v0.0.0 k8s.io/kubectl v0.21.5 @@ -77,7 +77,7 @@ replace ( github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc10 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 github.com/libopenstorage/autopilot-api => github.com/libopenstorage/autopilot-api v0.6.1-0.20210301232050-ca2633c6e114 github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20220707215604-afbea03c04c5 - //github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 + //github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 diff --git a/go.sum b/go.sum index acf4bdfd9b..70aa859352 100644 --- a/go.sum +++ b/go.sum @@ -1136,6 +1136,7 @@ github.com/libopenstorage/stork v1.4.1-0.20211103064004-088d8fdeaa37/go.mod h1:I github.com/libopenstorage/stork v1.4.1-0.20211113171730-e02f28e240e9/go.mod h1:NTt7xK9DqWpXLEBJI4WEz/XTUG3EkW0zcqyOMO5Xp2w= github.com/libopenstorage/stork v1.4.1-0.20220323180113-0ea773109d05/go.mod h1:h+tscSChqPpry+lUHJYFqC+Gk0JY/qi6eCkUJYBo0wQ= github.com/libopenstorage/stork v1.4.1-0.20220414104250-3c18fd21ed95/go.mod h1:yE94X0xBFSBQ9LvvJ/zppc4+XeiCAXtsHfYHm15dlcA= +github.com/libopenstorage/stork v1.4.1-0.20221103082056-65abc8cc4e80/go.mod h1:yX+IlCrUsZekC6zxL6zHE7sBPKIudubHB3EcImzeRbI= github.com/libopenstorage/systemutils v0.0.0-20160208220149-44ac83be3ce1 h1:5vqfYYWm4b+lbkMtvvWtWBiqLbmLN6dNvWaa7wVsz/Q= github.com/libopenstorage/systemutils v0.0.0-20160208220149-44ac83be3ce1/go.mod h1:xwNGC7xiz/BQ/wbMkvHujL8Gjgseg+x41xMek7sKRRQ= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -1424,6 +1425,8 @@ github.com/portworx/kdmp v0.4.1-0.20220309093511-f7b925b9e53e/go.mod h1:RAXbeaO/ github.com/portworx/kdmp v0.4.1-0.20220414053457-962507678379/go.mod h1:EAVroITfYd50a0vi/ScAILl6h5RYJteuO/pg1y3vNNw= github.com/portworx/kdmp v0.4.1-0.20220710173715-5d42efc7d149 h1:KaRMV5hWbl7raiTFo20AZaXSIBBKCadzBmrXfwU+Id0= github.com/portworx/kdmp v0.4.1-0.20220710173715-5d42efc7d149/go.mod h1:nb5AupP/63ByyqAYfZ+E32LDEnP0PjgH6w+yKXxWIgE= +github.com/portworx/kdmp v0.4.1-0.20221106080928-1fd01d5e6ed0 h1:orOtVtS8VcmKiorxN0E83QrTpUFiCQ5OMVOJaqhivOk= +github.com/portworx/kdmp v0.4.1-0.20221106080928-1fd01d5e6ed0/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= diff --git a/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/register.go b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/register.go index 078420de43..d6cb8d39d9 100644 --- a/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/register.go +++ b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/register.go @@ -38,6 +38,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &VolumeBackupDeleteList{}, &BackupLocationMaintenance{}, &BackupLocationMaintenanceList{}, + &ResourceExport{}, + &ResourceExportList{}, + &ResourceBackup{}, + &ResourceBackupList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourcebackup.go b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourcebackup.go new file mode 100644 index 0000000000..eb24932be2 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourcebackup.go @@ -0,0 +1,129 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // ResourceBackupResourceName is name for the ResourceBackup resource. + ResourceBackupResourceName = "resourcebackup" + // ResourceBackupResourcePlural is the name for list of ResourceBackup resources. + ResourceBackupResourcePlural = "resourcebackups" +) + +// ResourceBackupType defines a method of achieving Resource transfer. +type ResourceBackupType string + +// ResourceBackupStatus defines a status of ResourceBackup. +type ResourceBackupStatus string + +const ( + // ResourceBackupStatusInitial is the initial status of ResourceBackup. It indicates + // that a volume Backup request has been received. + ResourceBackupStatusInitial ResourceBackupStatus = "Initial" + // ResourceBackupStatusPending when Resource Backup is pending and not started yet. + ResourceBackupStatusPending ResourceBackupStatus = "Pending" + // ResourceBackupStatusInProgress when Resource is being transferred. + ResourceBackupStatusInProgress ResourceBackupStatus = "InProgress" + // ResourceBackupStatusFailed when Resource transfer is failed. + ResourceBackupStatusFailed ResourceBackupStatus = "Failed" + // ResourceBackupStatusSuccessful when Resource has been transferred. + ResourceBackupStatusSuccessful ResourceBackupStatus = "Successful" + // ResourceBackupStatusPartialSuccess when Resource was partially successful + ResourceBackupStatusPartialSuccess ResourceBackupStatus = "PartialSuccess" +) + +// ResourceBackupProgressStatus overall resource backup/restore progress +type ResourceBackupProgressStatus struct { + // Status status of resource export + Status ResourceBackupStatus `json:"status,omitempty"` + // Reason status reason + Reason string `json:"reason,omitempty"` + // Resources status of each resource being restore + Resources []*ResourceRestoreResourceInfo `json:"resources"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceBackup defines a spec for holding restore of resource status updated by NFS executor job +type ResourceBackup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ResourceBackupSpec `json:"spec"` + // Type - Backup or Restore + Type ResourceBackupType `json:"type,omitempty"` + // Status Overall status + Status ResourceBackupProgressStatus `json:"status,omitempty"` + // VolumesInfo Contains list of vols to be restored. Filled in by nfs executor job + VolumesInfo []*ResourceBackupVolumeInfo `json:"volumesInfo,omitempty"` + // ExistingVolumesInfo existing vols which are not be restored + ExistingVolumesInfo []*ResourceRestoreVolumeInfo `json:"existingVolumesInfo,omitempty"` +} + +// ResourceBackupSpec configuration parameters for ResourceBackup +type ResourceBackupSpec struct { + // ObjRef here is backuplocation CR + ObjRef ResourceBackupObjectReference `json:"source,omitempty"` + // PVC obj ref - During restore of vols store the ref of pvc + PVCObjRef ResourceBackupObjectReference `json:"pvcobj,omitempty"` +} + +// ResourceBackupObjectReference contains enough information to let you inspect the referred object. +type ResourceBackupObjectReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion,omitempty"` + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + Kind string `json:"kind,omitempty"` + // Namespace of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + Namespace string `json:"namespace,omitempty"` + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name string `json:"name,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceBackupList is a list of ResourceBackup resources. +type ResourceBackupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metaResource,omitempty"` + + Items []ResourceBackup `json:"items"` +} + +// ResourceBackupVolumeInfo is the info for the backup of a volume +type ResourceBackupVolumeInfo struct { + PersistentVolumeClaim string `json:"persistentVolumeClaim"` + PersistentVolumeClaimUID string `json:"persistentVolumeClaimUID"` + Namespace string `json:"namespace"` + Volume string `json:"volume"` + BackupID string `json:"backupID"` + DriverName string `json:"driverName"` + Zones []string `json:"zones"` + Status ResourceBackupStatus `json:"status"` + Reason string `json:"reason"` + Options map[string]string `json:"options"` + TotalSize uint64 `json:"totalSize"` + ActualSize uint64 `json:"actualSize"` + StorageClass string `json:"storageClass"` + Provisioner string `json:"provisioner"` + VolumeSnapshot string `json:"volumeSnapshot"` +} + +// ResourceRestoreVolumeInfo is the info for the restore of a volume +type ResourceRestoreVolumeInfo struct { + PersistentVolumeClaim string `json:"persistentVolumeClaim"` + PersistentVolumeClaimUID string `json:"persistentVolumeClaimUID"` + SourceNamespace string `json:"sourceNamespace"` + SourceVolume string `json:"sourceVolume"` + RestoreVolume string `json:"restoreVolume"` + DriverName string `json:"driverName"` + Zones []string `json:"zones"` + Status ResourceBackupStatus `json:"status"` + Reason string `json:"reason"` + TotalSize uint64 `json:"totalSize"` + Options map[string]string `json:"options"` +} diff --git a/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourceexport.go b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourceexport.go new file mode 100644 index 0000000000..3d9ac555f2 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourceexport.go @@ -0,0 +1,152 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // ResourceExportResourceName is name for the ResourceExport resource. + ResourceExportResourceName = "resourceexport" + // ResourceExportResourcePlural is the name for list of ResourceExport resources. + ResourceExportResourcePlural = "resourceexports" + // ResourceExportNFS resource export provided by nfs path + ResourceExportNFS ResourceExportType = "nfs" +) + +// ResourceExportType defines a method of achieving Resource transfer. +type ResourceExportType string + +// ResourceExportStatus defines a status of ResourceExport. +type ResourceExportStatus string + +// ResourceExportStage is the stage of the ResourceExport +type ResourceExportStage string + +// ResourceRestoreStatus defines the status of Resource after applying the spec during restore. +type ResourceRestoreStatus string + +// ObjectInfo contains info about an object being backed up or restored +type ObjectInfo struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + metav1.GroupVersionKind `json:",inline"` +} + +// ResourceRestoreResourceInfo is the info for the restore of a resource +type ResourceRestoreResourceInfo struct { + ObjectInfo `json:",inline"` + Status ResourceRestoreStatus `json:"status"` + Reason string `json:"reason"` +} + +const ( + // ResourceRestoreStatusFailed Restore Failed + ResourceRestoreStatusFailed ResourceRestoreStatus = "Failed" + // ResourceRestoreStatusRetained Restore Retained + ResourceRestoreStatusRetained ResourceRestoreStatus = "Retained" + // ResourceRestoreStatusSuccessful Restore Successful + ResourceRestoreStatusSuccessful ResourceRestoreStatus = "Successful" + // ResourceRestoreStatusInProgress Restore InProgress + ResourceRestoreStatusInProgress ResourceRestoreStatus = "InProgress" +) + +const ( + // ResourceExportStatusInitial is the initial status of ResourceExport. It indicates + // that a volume export request has been received. + ResourceExportStatusInitial ResourceExportStatus = "Initial" + // ResourceExportStatusPending when Resource export is pending and not started yet. + ResourceExportStatusPending ResourceExportStatus = "Pending" + // ResourceExportStatusInProgress when Resource is being transferred. + ResourceExportStatusInProgress ResourceExportStatus = "InProgress" + // ResourceExportStatusFailed when Resource transfer is failed. + ResourceExportStatusFailed ResourceExportStatus = "Failed" + // ResourceExportStatusSuccessful when Resource has been transferred. + ResourceExportStatusSuccessful ResourceExportStatus = "Successful" +) + +const ( + // ResourceExportBackup backup op for resource upload + ResourceExportBackup ResourceExportType = "nfs" +) + +const ( + // ResourceExportStageInitial is the initial stage for ResourceExport + ResourceExportStageInitial ResourceExportStage = "Initial" + // ResourceExportStageInProgress is the InProgress stage for ResourceExport + ResourceExportStageInProgress ResourceExportStage = "InProgress" + // ResourceExportStageFailed is the Failed stage for ResourceExport + ResourceExportStageFailed ResourceExportStage = "Failed" + // ResourceExportStageSuccessful is the Successful stage for ResourceExport + ResourceExportStageSuccessful ResourceExportStage = "Successful" + // ResourceExportStageFinal is the Final stage for ResourceExport + ResourceExportStageFinal ResourceExportStage = "Final" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceExport defines a spec for restoring resources to NFS target +type ResourceExport struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ResourceExportSpec `json:"spec"` + // Status Overall status + Status ResourceStatus `json:"status,omitempty"` + // VolumesInfo Contains list of vols to be restored. Filled in by nfs executor job + VolumesInfo []*ResourceBackupVolumeInfo `json:"volumesInfo"` + // ExistingVolumesInfo existing vols which are not be restored + ExistingVolumesInfo []*ResourceRestoreVolumeInfo `json:"existingVolumesInfo,omitempty"` +} + +// ResourceExportSpec configuration parameters for ResourceExport +type ResourceExportSpec struct { + // Type - Backup or Restore + Type ResourceExportType `json:"type,omitempty"` + // TriggeredFrom is to know which module is created the resourceExport CR. + // The intention is to know from where to get the nfs executor image + TriggeredFrom string `json:"triggerFrom,omitempty"` + TriggeredFromNs string `json:"triggerFromNs,omitempty"` + // Source here is applicationBackup CR for backup + Source ResourceExportObjectReference `json:"source,omitempty"` + // Destination is the ref to BL CR + Destination ResourceExportObjectReference `json:"destination,omitempty"` +} + +// ResourceStatus overall resource backup/restore progress +type ResourceStatus struct { + // Status status of resource export + Status ResourceExportStatus `json:"status,omitempty"` + // Reason status reason + Reason string `json:"reason,omitempty"` + // TransferID job transfer ID + TransferID string `json:"transferID,omitempty"` + // Stage resource export stage + Stage ResourceExportStage `json:"stage,omitempty"` + // Resources status of each resource being restore + Resources []*ResourceRestoreResourceInfo `json:"resources"` +} + +// ResourceExportObjectReference contains enough information to let you inspect the referred object. +type ResourceExportObjectReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion,omitempty"` + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + Kind string `json:"kind,omitempty"` + // Namespace of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + Namespace string `json:"namespace,omitempty"` + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name string `json:"name,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceExportList is a list of ResourceExport resources. +type ResourceExportList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metaResource,omitempty"` + + Items []ResourceExport `json:"items"` +} diff --git a/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/zz_generated.deepcopy.go index 357b1784f2..23d1027b30 100644 --- a/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -11,6 +12,7 @@ LICENSE package v1alpha1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -127,7 +129,7 @@ func (in *DataExport) DeepCopyInto(out *DataExport) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -219,6 +221,11 @@ func (in *DataExportSpec) DeepCopy() *DataExportSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExportStatus) DeepCopyInto(out *ExportStatus) { *out = *in + if in.RestorePVC != nil { + in, out := &in.RestorePVC, &out.RestorePVC + *out = new(v1.PersistentVolumeClaim) + (*in).DeepCopyInto(*out) + } return } @@ -232,6 +239,23 @@ func (in *ExportStatus) DeepCopy() *ExportStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectInfo) DeepCopyInto(out *ObjectInfo) { + *out = *in + out.GroupVersionKind = in.GroupVersionKind + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectInfo. +func (in *ObjectInfo) DeepCopy() *ObjectInfo { + if in == nil { + return nil + } + out := new(ObjectInfo) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RepoMaintenanceStatus) DeepCopyInto(out *RepoMaintenanceStatus) { *out = *in @@ -249,6 +273,367 @@ func (in *RepoMaintenanceStatus) DeepCopy() *RepoMaintenanceStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceBackup) DeepCopyInto(out *ResourceBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + if in.VolumesInfo != nil { + in, out := &in.VolumesInfo, &out.VolumesInfo + *out = make([]*ResourceBackupVolumeInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourceBackupVolumeInfo) + (*in).DeepCopyInto(*out) + } + } + } + if in.ExistingVolumesInfo != nil { + in, out := &in.ExistingVolumesInfo, &out.ExistingVolumesInfo + *out = make([]*ResourceRestoreVolumeInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourceRestoreVolumeInfo) + (*in).DeepCopyInto(*out) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceBackup. +func (in *ResourceBackup) DeepCopy() *ResourceBackup { + if in == nil { + return nil + } + out := new(ResourceBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceBackupList) DeepCopyInto(out *ResourceBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceBackupList. +func (in *ResourceBackupList) DeepCopy() *ResourceBackupList { + if in == nil { + return nil + } + out := new(ResourceBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceBackupObjectReference) DeepCopyInto(out *ResourceBackupObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceBackupObjectReference. +func (in *ResourceBackupObjectReference) DeepCopy() *ResourceBackupObjectReference { + if in == nil { + return nil + } + out := new(ResourceBackupObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceBackupProgressStatus) DeepCopyInto(out *ResourceBackupProgressStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]*ResourceRestoreResourceInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourceRestoreResourceInfo) + **out = **in + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceBackupProgressStatus. +func (in *ResourceBackupProgressStatus) DeepCopy() *ResourceBackupProgressStatus { + if in == nil { + return nil + } + out := new(ResourceBackupProgressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceBackupSpec) DeepCopyInto(out *ResourceBackupSpec) { + *out = *in + out.ObjRef = in.ObjRef + out.PVCObjRef = in.PVCObjRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceBackupSpec. +func (in *ResourceBackupSpec) DeepCopy() *ResourceBackupSpec { + if in == nil { + return nil + } + out := new(ResourceBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceBackupVolumeInfo) DeepCopyInto(out *ResourceBackupVolumeInfo) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceBackupVolumeInfo. +func (in *ResourceBackupVolumeInfo) DeepCopy() *ResourceBackupVolumeInfo { + if in == nil { + return nil + } + out := new(ResourceBackupVolumeInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceExport) DeepCopyInto(out *ResourceExport) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + if in.VolumesInfo != nil { + in, out := &in.VolumesInfo, &out.VolumesInfo + *out = make([]*ResourceBackupVolumeInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourceBackupVolumeInfo) + (*in).DeepCopyInto(*out) + } + } + } + if in.ExistingVolumesInfo != nil { + in, out := &in.ExistingVolumesInfo, &out.ExistingVolumesInfo + *out = make([]*ResourceRestoreVolumeInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourceRestoreVolumeInfo) + (*in).DeepCopyInto(*out) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExport. +func (in *ResourceExport) DeepCopy() *ResourceExport { + if in == nil { + return nil + } + out := new(ResourceExport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceExport) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceExportList) DeepCopyInto(out *ResourceExportList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceExport, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExportList. +func (in *ResourceExportList) DeepCopy() *ResourceExportList { + if in == nil { + return nil + } + out := new(ResourceExportList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceExportList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceExportObjectReference) DeepCopyInto(out *ResourceExportObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExportObjectReference. +func (in *ResourceExportObjectReference) DeepCopy() *ResourceExportObjectReference { + if in == nil { + return nil + } + out := new(ResourceExportObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceExportSpec) DeepCopyInto(out *ResourceExportSpec) { + *out = *in + out.Source = in.Source + out.Destination = in.Destination + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExportSpec. +func (in *ResourceExportSpec) DeepCopy() *ResourceExportSpec { + if in == nil { + return nil + } + out := new(ResourceExportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRestoreResourceInfo) DeepCopyInto(out *ResourceRestoreResourceInfo) { + *out = *in + out.ObjectInfo = in.ObjectInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRestoreResourceInfo. +func (in *ResourceRestoreResourceInfo) DeepCopy() *ResourceRestoreResourceInfo { + if in == nil { + return nil + } + out := new(ResourceRestoreResourceInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRestoreVolumeInfo) DeepCopyInto(out *ResourceRestoreVolumeInfo) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRestoreVolumeInfo. +func (in *ResourceRestoreVolumeInfo) DeepCopy() *ResourceRestoreVolumeInfo { + if in == nil { + return nil + } + out := new(ResourceRestoreVolumeInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]*ResourceRestoreResourceInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourceRestoreResourceInfo) + **out = **in + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus. +func (in *ResourceStatus) DeepCopy() *ResourceStatus { + if in == nil { + return nil + } + out := new(ResourceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeBackup) DeepCopyInto(out *VolumeBackup) { *out = *in diff --git a/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/generated_expansion.go b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/generated_expansion.go index c3dfe990e3..653ba63330 100644 --- a/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/generated_expansion.go +++ b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/generated_expansion.go @@ -12,6 +12,10 @@ type BackupLocationMaintenanceExpansion interface{} type DataExportExpansion interface{} +type ResourceBackupExpansion interface{} + +type ResourceExportExpansion interface{} + type VolumeBackupExpansion interface{} type VolumeBackupDeleteExpansion interface{} diff --git a/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/kdmp_client.go b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/kdmp_client.go index a1421c000d..83c5912c2b 100644 --- a/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/kdmp_client.go +++ b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/kdmp_client.go @@ -18,6 +18,8 @@ type KdmpV1alpha1Interface interface { RESTClient() rest.Interface BackupLocationMaintenancesGetter DataExportsGetter + ResourceBackupsGetter + ResourceExportsGetter VolumeBackupsGetter VolumeBackupDeletesGetter } @@ -35,6 +37,14 @@ func (c *KdmpV1alpha1Client) DataExports(namespace string) DataExportInterface { return newDataExports(c, namespace) } +func (c *KdmpV1alpha1Client) ResourceBackups(namespace string) ResourceBackupInterface { + return newResourceBackups(c, namespace) +} + +func (c *KdmpV1alpha1Client) ResourceExports(namespace string) ResourceExportInterface { + return newResourceExports(c, namespace) +} + func (c *KdmpV1alpha1Client) VolumeBackups(namespace string) VolumeBackupInterface { return newVolumeBackups(c, namespace) } diff --git a/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/resourcebackup.go b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/resourcebackup.go new file mode 100644 index 0000000000..5d26d37417 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/resourcebackup.go @@ -0,0 +1,185 @@ +/* + +LICENSE + +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + scheme "github.com/portworx/kdmp/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ResourceBackupsGetter has a method to return a ResourceBackupInterface. +// A group's client should implement this interface. +type ResourceBackupsGetter interface { + ResourceBackups(namespace string) ResourceBackupInterface +} + +// ResourceBackupInterface has methods to work with ResourceBackup resources. +type ResourceBackupInterface interface { + Create(ctx context.Context, resourceBackup *v1alpha1.ResourceBackup, opts v1.CreateOptions) (*v1alpha1.ResourceBackup, error) + Update(ctx context.Context, resourceBackup *v1alpha1.ResourceBackup, opts v1.UpdateOptions) (*v1alpha1.ResourceBackup, error) + UpdateStatus(ctx context.Context, resourceBackup *v1alpha1.ResourceBackup, opts v1.UpdateOptions) (*v1alpha1.ResourceBackup, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceBackup, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceBackupList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceBackup, err error) + ResourceBackupExpansion +} + +// resourceBackups implements ResourceBackupInterface +type resourceBackups struct { + client rest.Interface + ns string +} + +// newResourceBackups returns a ResourceBackups +func newResourceBackups(c *KdmpV1alpha1Client, namespace string) *resourceBackups { + return &resourceBackups{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the resourceBackup, and returns the corresponding resourceBackup object, and an error if there is any. +func (c *resourceBackups) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceBackup, err error) { + result = &v1alpha1.ResourceBackup{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcebackups"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceBackups that match those selectors. +func (c *resourceBackups) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceBackupList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ResourceBackupList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcebackups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceBackups. +func (c *resourceBackups) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resourcebackups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resourceBackup and creates it. Returns the server's representation of the resourceBackup, and an error, if there is any. +func (c *resourceBackups) Create(ctx context.Context, resourceBackup *v1alpha1.ResourceBackup, opts v1.CreateOptions) (result *v1alpha1.ResourceBackup, err error) { + result = &v1alpha1.ResourceBackup{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourcebackups"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceBackup). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a resourceBackup and updates it. Returns the server's representation of the resourceBackup, and an error, if there is any. +func (c *resourceBackups) Update(ctx context.Context, resourceBackup *v1alpha1.ResourceBackup, opts v1.UpdateOptions) (result *v1alpha1.ResourceBackup, err error) { + result = &v1alpha1.ResourceBackup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcebackups"). + Name(resourceBackup.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceBackup). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *resourceBackups) UpdateStatus(ctx context.Context, resourceBackup *v1alpha1.ResourceBackup, opts v1.UpdateOptions) (result *v1alpha1.ResourceBackup, err error) { + result = &v1alpha1.ResourceBackup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcebackups"). + Name(resourceBackup.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceBackup). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the resourceBackup and deletes it. Returns an error if one occurs. +func (c *resourceBackups) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcebackups"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceBackups) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcebackups"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resourceBackup. +func (c *resourceBackups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceBackup, err error) { + result = &v1alpha1.ResourceBackup{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("resourcebackups"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/resourceexport.go b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/resourceexport.go new file mode 100644 index 0000000000..7d51a89566 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/resourceexport.go @@ -0,0 +1,185 @@ +/* + +LICENSE + +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + scheme "github.com/portworx/kdmp/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ResourceExportsGetter has a method to return a ResourceExportInterface. +// A group's client should implement this interface. +type ResourceExportsGetter interface { + ResourceExports(namespace string) ResourceExportInterface +} + +// ResourceExportInterface has methods to work with ResourceExport resources. +type ResourceExportInterface interface { + Create(ctx context.Context, resourceExport *v1alpha1.ResourceExport, opts v1.CreateOptions) (*v1alpha1.ResourceExport, error) + Update(ctx context.Context, resourceExport *v1alpha1.ResourceExport, opts v1.UpdateOptions) (*v1alpha1.ResourceExport, error) + UpdateStatus(ctx context.Context, resourceExport *v1alpha1.ResourceExport, opts v1.UpdateOptions) (*v1alpha1.ResourceExport, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceExport, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceExportList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceExport, err error) + ResourceExportExpansion +} + +// resourceExports implements ResourceExportInterface +type resourceExports struct { + client rest.Interface + ns string +} + +// newResourceExports returns a ResourceExports +func newResourceExports(c *KdmpV1alpha1Client, namespace string) *resourceExports { + return &resourceExports{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the resourceExport, and returns the corresponding resourceExport object, and an error if there is any. +func (c *resourceExports) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceExport, err error) { + result = &v1alpha1.ResourceExport{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourceexports"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceExports that match those selectors. +func (c *resourceExports) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceExportList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ResourceExportList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourceexports"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceExports. +func (c *resourceExports) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resourceexports"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resourceExport and creates it. Returns the server's representation of the resourceExport, and an error, if there is any. +func (c *resourceExports) Create(ctx context.Context, resourceExport *v1alpha1.ResourceExport, opts v1.CreateOptions) (result *v1alpha1.ResourceExport, err error) { + result = &v1alpha1.ResourceExport{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourceexports"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceExport). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a resourceExport and updates it. Returns the server's representation of the resourceExport, and an error, if there is any. +func (c *resourceExports) Update(ctx context.Context, resourceExport *v1alpha1.ResourceExport, opts v1.UpdateOptions) (result *v1alpha1.ResourceExport, err error) { + result = &v1alpha1.ResourceExport{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourceexports"). + Name(resourceExport.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceExport). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *resourceExports) UpdateStatus(ctx context.Context, resourceExport *v1alpha1.ResourceExport, opts v1.UpdateOptions) (result *v1alpha1.ResourceExport, err error) { + result = &v1alpha1.ResourceExport{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourceexports"). + Name(resourceExport.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceExport). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the resourceExport and deletes it. Returns an error if one occurs. +func (c *resourceExports) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourceexports"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceExports) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("resourceexports"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resourceExport. +func (c *resourceExports) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceExport, err error) { + result = &v1alpha1.ResourceExport{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("resourceexports"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/portworx/kdmp/pkg/controllers/common.go b/vendor/github.com/portworx/kdmp/pkg/controllers/common.go new file mode 100644 index 0000000000..a3fc3d4a7b --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/controllers/common.go @@ -0,0 +1,50 @@ +package controllers + +import ( + "os" + "time" + + storkapi "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/portworx/sched-ops/k8s/stork" + "k8s.io/apimachinery/pkg/util/yaml" +) + +var ( + // ResyncPeriod controller resync period + ResyncPeriod = 10 * time.Second + // RequeuePeriod controller requeue period + RequeuePeriod = 5 * time.Second + // ValidateCRDInterval CRD validation interval + ValidateCRDInterval time.Duration = 10 * time.Second + // ValidateCRDTimeout CRD validation timeout + ValidateCRDTimeout time.Duration = 2 * time.Minute + // CleanupFinalizer cleanup finalizer + CleanupFinalizer = "kdmp.portworx.com/finalizer-cleanup" + // TaskDefaultTimeout timeout for retry task + TaskDefaultTimeout = 1 * time.Minute + // TaskProgressCheckInterval to check task progress at specified interval + TaskProgressCheckInterval = 5 * time.Second +) + +// ReadBackupLocation fetching backuplocation CR +func ReadBackupLocation(name, namespace, filePath string) (*storkapi.BackupLocation, error) { + if name != "" { + if namespace == "" { + namespace = "default" + } + return stork.Instance().GetBackupLocation(name, namespace) + } + + // TODO: This is needed for restic, we can think of removing it later + f, err := os.Open(filePath) + if err != nil { + return nil, err + } + + out := &storkapi.BackupLocation{} + if err = yaml.NewYAMLOrJSONDecoder(f, 1024).Decode(out); err != nil { + return nil, err + } + + return out, nil +} diff --git a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/dataexport.go b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/dataexport.go index e60315078c..1ad93b2e66 100644 --- a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/dataexport.go +++ b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/dataexport.go @@ -3,11 +3,11 @@ package dataexport import ( "context" "reflect" - "time" "github.com/libopenstorage/stork/pkg/controllers" "github.com/libopenstorage/stork/pkg/snapshotter" kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + kdmpcontroller "github.com/portworx/kdmp/pkg/controllers" "github.com/portworx/kdmp/pkg/utils" "github.com/portworx/kdmp/pkg/version" "github.com/portworx/sched-ops/k8s/apiextensions" @@ -23,15 +23,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -var ( - resyncPeriod = 10 * time.Second - requeuePeriod = 5 * time.Second - validateCRDInterval time.Duration = 10 * time.Second - validateCRDTimeout time.Duration = 2 * time.Minute - - cleanupFinalizer = "kdmp.portworx.com/finalizer-cleanup" -) - // Controller is a k8s controller that handles DataExport resources. type Controller struct { client runtimeclient.Client @@ -71,7 +62,6 @@ func (c *Controller) Init(mgr manager.Manager) error { // // The Controller will requeue the Request to be processed again if the returned error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -// func (c *Controller) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { logrus.Tracef("Reconciling DataExport %s/%s", request.Namespace, request.Name) @@ -84,24 +74,24 @@ func (c *Controller) Reconcile(ctx context.Context, request reconcile.Request) ( return reconcile.Result{}, nil } // Error reading the object - requeue the request. - return reconcile.Result{RequeueAfter: requeuePeriod}, nil + return reconcile.Result{RequeueAfter: kdmpcontroller.RequeuePeriod}, nil } - if !controllers.ContainsFinalizer(dataExport, cleanupFinalizer) { - controllers.SetFinalizer(dataExport, cleanupFinalizer) + if !controllers.ContainsFinalizer(dataExport, kdmpcontroller.CleanupFinalizer) { + controllers.SetFinalizer(dataExport, kdmpcontroller.CleanupFinalizer) return reconcile.Result{Requeue: true}, c.client.Update(context.TODO(), dataExport) } requeue, err := c.sync(context.TODO(), dataExport) if err != nil { logrus.Errorf("kdmp controller: %s/%s: %s", request.Namespace, request.Name, err) - return reconcile.Result{RequeueAfter: requeuePeriod}, nil + return reconcile.Result{RequeueAfter: kdmpcontroller.RequeuePeriod}, nil } if requeue { - return reconcile.Result{RequeueAfter: requeuePeriod}, nil + return reconcile.Result{RequeueAfter: kdmpcontroller.RequeuePeriod}, nil } - return reconcile.Result{RequeueAfter: resyncPeriod}, nil + return reconcile.Result{RequeueAfter: kdmpcontroller.ResyncPeriod}, nil } func (c *Controller) createCRD() error { @@ -124,7 +114,7 @@ func (c *Controller) createCRD() error { if err != nil && !errors.IsAlreadyExists(err) { return err } - if err := apiextensions.Instance().ValidateCRD(vb.Plural+"."+vb.Group, validateCRDTimeout, validateCRDInterval); err != nil { + if err := apiextensions.Instance().ValidateCRD(vb.Plural+"."+vb.Group, kdmpcontroller.ValidateCRDTimeout, kdmpcontroller.ValidateCRDInterval); err != nil { return err } } else { @@ -132,7 +122,7 @@ func (c *Controller) createCRD() error { if err != nil && !errors.IsAlreadyExists(err) { return err } - if err := apiextensions.Instance().ValidateCRDV1beta1(vb, validateCRDTimeout, validateCRDInterval); err != nil { + if err := apiextensions.Instance().ValidateCRDV1beta1(vb, kdmpcontroller.ValidateCRDTimeout, kdmpcontroller.ValidateCRDInterval); err != nil { return err } } @@ -151,7 +141,7 @@ func (c *Controller) createCRD() error { if err != nil && !errors.IsAlreadyExists(err) { return err } - if err := apiextensions.Instance().ValidateCRD(resource.Plural+"."+vb.Group, validateCRDTimeout, validateCRDInterval); err != nil { + if err := apiextensions.Instance().ValidateCRD(resource.Plural+"."+vb.Group, kdmpcontroller.ValidateCRDTimeout, kdmpcontroller.ValidateCRDInterval); err != nil { return err } } else { @@ -159,7 +149,7 @@ func (c *Controller) createCRD() error { if err != nil && !errors.IsAlreadyExists(err) { return err } - if err := apiextensions.Instance().ValidateCRDV1beta1(resource, validateCRDTimeout, validateCRDInterval); err != nil { + if err := apiextensions.Instance().ValidateCRDV1beta1(resource, kdmpcontroller.ValidateCRDTimeout, kdmpcontroller.ValidateCRDInterval); err != nil { return err } } diff --git a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go index f588c013d4..9e5fb09233 100644 --- a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go +++ b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go @@ -18,10 +18,12 @@ import ( "github.com/libopenstorage/stork/pkg/controllers" "github.com/libopenstorage/stork/pkg/snapshotter" kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + kdmpcontroller "github.com/portworx/kdmp/pkg/controllers" "github.com/portworx/kdmp/pkg/drivers" "github.com/portworx/kdmp/pkg/drivers/driversinstance" "github.com/portworx/kdmp/pkg/drivers/utils" kdmpopts "github.com/portworx/kdmp/pkg/util/ops" + "github.com/portworx/kdmp/pkg/version" "github.com/portworx/sched-ops/k8s/batch" "github.com/portworx/sched-ops/k8s/core" @@ -35,7 +37,6 @@ import ( k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/rest" k8shelper "k8s.io/component-helpers/storage/volume" @@ -72,9 +73,7 @@ const ( // pvcNameLenLimitForJob is the max length of PVC name that the bound job // will incorporate in their names pvcNameLenLimitForJob = 48 - volumeinitialDelay = 2 * time.Second - volumeFactor = 1.5 - volumeSteps = 15 + defaultTimeout = 1 * time.Minute progressCheckInterval = 5 * time.Second compressionKey = "KDMP_COMPRESSION" @@ -96,12 +95,6 @@ type updateDataExportDetail struct { volumeSnapshot string } -var volumeAPICallBackoff = wait.Backoff{ - Duration: volumeinitialDelay, - Factor: volumeFactor, - Steps: volumeSteps, -} - func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, error) { if in == nil { return false, nil @@ -123,7 +116,7 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er // delete an object on the init stage without cleanup if dataExport.DeletionTimestamp != nil && dataExport.Status.Stage == kdmpapi.DataExportStageInitial { - if !controllers.ContainsFinalizer(dataExport, cleanupFinalizer) { + if !controllers.ContainsFinalizer(dataExport, kdmpcontroller.CleanupFinalizer) { return false, nil } @@ -158,7 +151,7 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er } if dataExport.DeletionTimestamp != nil { - if !controllers.ContainsFinalizer(dataExport, cleanupFinalizer) { + if !controllers.ContainsFinalizer(dataExport, kdmpcontroller.CleanupFinalizer) { return false, nil } if err = c.cleanUp(driver, dataExport); err != nil { @@ -232,15 +225,23 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er // Create the pvc from the spec provided in the dataexport CR pvcSpec := dataExport.Status.RestorePVC - _, err = c.createPVC(dataExport) + // For NFS PVC creation happens upfront and createPVC() fails internally during vol restore + // as in DE CR PVC ref doesn't have all PVC params to create just has pvc name and ns which is + // expected as PVC is already created so doing additional check. + _, err = core.Instance().GetPersistentVolumeClaim(pvcSpec.Name, pvcSpec.Namespace) if err != nil { - msg := fmt.Sprintf("Error creating pvc %s/%s for restore: %v", pvcSpec.Namespace, pvcSpec.Name, err) - logrus.Errorf(msg) - data := updateDataExportDetail{ - status: kdmpapi.DataExportStatusFailed, - reason: msg, + if k8sErrors.IsNotFound(err) { + _, err = c.createPVC(dataExport) + if err != nil { + msg := fmt.Sprintf("Error creating pvc %s/%s for restore: %v", pvcSpec.Namespace, pvcSpec.Name, err) + logrus.Errorf(msg) + data := updateDataExportDetail{ + status: kdmpapi.DataExportStatusFailed, + reason: msg, + } + return false, c.updateStatus(dataExport, data) + } } - return false, c.updateStatus(dataExport, data) } _, err = checkPVCIgnoringJobMounts(dataExport.Spec.Destination, dataExport.Name) @@ -273,6 +274,24 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er compressionType = kdmpData.Data[compressionKey] podDataPath = kdmpData.Data[backupPath] } + blName := dataExport.Spec.Destination.Name + blNamespace := dataExport.Spec.Destination.Namespace + + if driverName == drivers.KopiaRestore { + blName = vb.Spec.BackupLocation.Name + blNamespace = vb.Spec.BackupLocation.Namespace + } + + backupLocation, err := readBackupLocation(blName, blNamespace, "") + if err != nil { + msg := fmt.Sprintf("reading of backuplocation [%v/%v] failed: %v", blNamespace, blName, err) + logrus.Errorf(msg) + data := updateDataExportDetail{ + status: kdmpapi.DataExportStatusFailed, + reason: msg, + } + return false, c.updateStatus(dataExport, data) + } // start data transfer id, err := startTransferJob( @@ -283,6 +302,9 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er podDataPath, utils.KdmpConfigmapName, utils.KdmpConfigmapNamespace, + backupLocation.Location.NfsConfig.ServerAddr, + backupLocation.Location.Path, + backupLocation.Location.NfsConfig.MountOption, ) if err != nil && err != utils.ErrJobAlreadyRunning && err != utils.ErrOutOfJobResources { msg := fmt.Sprintf("failed to start a data transfer job, dataexport [%v]: %v", dataExport.Name, err) @@ -429,6 +451,17 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er data := updateDataExportDetail{ stage: kdmpapi.DataExportStageFinal, } + // Append the job-pod log to stork's pod log in case of failure + // it is best effort approach, hence errors are ignored. + if dataExport.Status.Status == kdmpapi.DataExportStatusFailed { + if dataExport.Status.TransferID != "" { + namespace, name, err := utils.ParseJobID(dataExport.Status.TransferID) + if err != nil { + logrus.Infof("job-pod name and namespace extraction failed: %v", err) + } + appendPodLogToStork(name, namespace) + } + } cleanupTask := func() (interface{}, bool, error) { cleanupErr := c.cleanUp(driver, dataExport) if cleanupErr != nil { @@ -452,6 +485,34 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er return false, nil } +func appendPodLogToStork(jobName string, namespace string) { + // Get job and check whether it has live pod attaced to it + job, err := batch.Instance().GetJob(jobName, namespace) + if err != nil && !k8sErrors.IsNotFound(err) { + logrus.Infof("failed in getting job %v/%v with err: %v", namespace, jobName, err) + } + pods, err := core.Instance().GetPods( + job.Namespace, + map[string]string{ + "job-name": job.Name, + }, + ) + if err != nil { + logrus.Infof("failed in fetching job pods %s/%s: %v", namespace, jobName, err) + } + for _, pod := range pods.Items { + numLogLines := int64(50) + podLog, err := core.Instance().GetPodLog(pod.Name, pod.Namespace, &corev1.PodLogOptions{TailLines: &numLogLines}) + if err != nil { + logrus.Infof("error fetching log of job-pod %s: %v", pod.Name, err) + } else { + logrus.Infof("start of job-pod [%s]'s log...", pod.Name) + logrus.Infof(podLog) + logrus.Infof("end of job-pod [%s]'s log...", pod.Name) + } + } +} + func (c *Controller) createJobCredCertSecrets( dataExport *kdmpapi.DataExport, vb *kdmpapi.VolumeBackup, @@ -1190,6 +1251,13 @@ func (c *Controller) stageLocalSnapshotRestoreInProgress(ctx context.Context, da func (c *Controller) cleanUp(driver drivers.Interface, de *kdmpapi.DataExport) error { var bl *storkapi.BackupLocation + doCleanup, err := utils.DoCleanupResource() + if err != nil { + return err + } + if (de.Status.Status == kdmpapi.DataExportStatusFailed) && !doCleanup { + return nil + } if driver == nil { return fmt.Errorf("driver is nil") } @@ -1265,6 +1333,21 @@ func (c *Controller) cleanUp(driver drivers.Interface, de *kdmpapi.DataExport) e if err != nil && !k8sErrors.IsNotFound(err) { return fmt.Errorf("delete %s job: %s", de.Status.TransferID, err) } + //TODO : Need better way to find BL type from de CR + // For now deleting unconditionally for all BL type. + namespace, jobName, err := utils.ParseJobID(de.Status.TransferID) + if err != nil { + return err + } + pvcName := utils.GetPvcNameForJob(jobName) + if err := core.Instance().DeletePersistentVolumeClaim(pvcName, namespace); err != nil && !k8sErrors.IsNotFound(err) { + return fmt.Errorf("delete %s/%s pvc: %s", namespace, pvcName, err) + } + + pvName := utils.GetPvNameForJob(jobName) + if err := core.Instance().DeletePersistentVolume(pvName); err != nil && !k8sErrors.IsNotFound(err) { + return fmt.Errorf("delete %s pv: %s", pvName, err) + } } if err := core.Instance().DeleteSecret(utils.GetCredSecretName(de.Name), namespace); err != nil && !k8sErrors.IsNotFound(err) { @@ -1389,7 +1472,7 @@ func (c *Controller) updateStatus(de *kdmpapi.DataExport, data updateDataExportD de.Status.SnapshotNamespace = data.snapshotNamespace } if data.removeFinalizer { - controllers.RemoveFinalizer(de, cleanupFinalizer) + controllers.RemoveFinalizer(de, kdmpcontroller.CleanupFinalizer) } if data.volumeSnapshot != "" { de.Status.VolumeSnapshot = data.volumeSnapshot @@ -1557,7 +1640,11 @@ func startTransferJob( dataExport *kdmpapi.DataExport, podDataPath string, jobConfigMap string, - jobConfigMapNs string) (string, error) { + jobConfigMapNs string, + nfsServerAddr string, + nfsExportPath string, + nfsMountOption string, +) (string, error) { if drv == nil { return "", fmt.Errorf("data transfer driver is not set") } @@ -1605,6 +1692,9 @@ func startTransferJob( drivers.WithPodDatapathType(podDataPath), drivers.WithJobConfigMap(jobConfigMap), drivers.WithJobConfigMapNs(jobConfigMapNs), + drivers.WithNfsServer(nfsServerAddr), + drivers.WithNfsExportDir(nfsExportPath), + drivers.WithNfsMountOption(nfsMountOption), ) case drivers.KopiaRestore: return drv.StartJob( @@ -1621,6 +1711,8 @@ func startTransferJob( drivers.WithCertSecretNamespace(dataExport.Spec.Destination.Namespace), drivers.WithJobConfigMap(jobConfigMap), drivers.WithJobConfigMapNs(jobConfigMapNs), + drivers.WithNfsServer(nfsServerAddr), + drivers.WithNfsExportDir(nfsExportPath), ) } @@ -1632,7 +1724,7 @@ func checkPVC(in kdmpapi.DataExportObjectReference, checkMounts bool) (*corev1.P return nil, err } // wait for pvc to get bound - pvc, err := waitForPVCBound(in, checkMounts) + pvc, err := utils.WaitForPVCBound(in.Name, in.Namespace) if err != nil { return nil, err } @@ -1650,36 +1742,6 @@ func checkPVC(in kdmpapi.DataExportObjectReference, checkMounts bool) (*corev1.P return pvc, nil } -func waitForPVCBound(in kdmpapi.DataExportObjectReference, checkMounts bool) (*corev1.PersistentVolumeClaim, error) { - if err := checkNameNamespace(in); err != nil { - return nil, err - } - // wait for pvc to get bound - var pvc *corev1.PersistentVolumeClaim - var err error - var errMsg string - wErr := wait.ExponentialBackoff(volumeAPICallBackoff, func() (bool, error) { - pvc, err = core.Instance().GetPersistentVolumeClaim(in.Name, in.Namespace) - if err != nil { - return false, err - } - - if pvc.Status.Phase != corev1.ClaimBound { - errMsg = fmt.Sprintf("pvc status: expected %s, got %s", corev1.ClaimBound, pvc.Status.Phase) - logrus.Debugf("%v", errMsg) - return false, nil - } - - return true, nil - }) - - if wErr != nil { - logrus.Errorf("%v", wErr) - return nil, fmt.Errorf("%s:%s", wErr, errMsg) - } - return pvc, nil -} - func checkPVCIgnoringJobMounts(in kdmpapi.DataExportObjectReference, expectedMountJob string) (*corev1.PersistentVolumeClaim, error) { var pvc *corev1.PersistentVolumeClaim var checkErr error @@ -1701,7 +1763,7 @@ func checkPVCIgnoringJobMounts(in kdmpapi.DataExportObjectReference, expectedMou logrus.Debugf("checkPVCIgnoringJobMounts: pvc name %v - storage class VolumeBindingMode %v", pvc.Name, *sc.VolumeBindingMode) if *sc.VolumeBindingMode != storagev1.VolumeBindingWaitForFirstConsumer { // wait for pvc to get bound - pvc, checkErr = waitForPVCBound(in, true) + pvc, checkErr = utils.WaitForPVCBound(in.Name, in.Namespace) if checkErr != nil { return "", false, checkErr } @@ -1709,7 +1771,7 @@ func checkPVCIgnoringJobMounts(in kdmpapi.DataExportObjectReference, expectedMou } else { // If sc is not set, we will direct check the pvc status // wait for pvc to get bound - pvc, checkErr = waitForPVCBound(in, true) + pvc, checkErr = utils.WaitForPVCBound(in.Name, in.Namespace) if checkErr != nil { return "", false, checkErr } @@ -1899,6 +1961,8 @@ func CreateCredentialsSecret(secretName, blName, blNamespace, namespace string, return createGoogleSecret(secretName, backupLocation, namespace, labels) case storkapi.BackupLocationAzure: return createAzureSecret(secretName, backupLocation, namespace, labels) + case storkapi.BackupLocationNFS: + return utils.CreateNfsSecret(secretName, backupLocation, namespace, labels) } return fmt.Errorf("unsupported backup location: %v", backupLocation.Location.Type) @@ -1936,7 +2000,7 @@ func createS3Secret(secretName string, backupLocation *storkapi.BackupLocation, credentialData["type"] = []byte(backupLocation.Location.Type) credentialData["password"] = []byte(backupLocation.Location.RepositoryPassword) credentialData["disablessl"] = []byte(strconv.FormatBool(backupLocation.Location.S3Config.DisableSSL)) - err := createJobSecret(secretName, namespace, credentialData, labels) + err := utils.CreateJobSecret(secretName, namespace, credentialData, labels) return err } @@ -1948,7 +2012,7 @@ func createGoogleSecret(secretName string, backupLocation *storkapi.BackupLocati credentialData["accountkey"] = []byte(backupLocation.Location.GoogleConfig.AccountKey) credentialData["projectid"] = []byte(backupLocation.Location.GoogleConfig.ProjectID) credentialData["path"] = []byte(backupLocation.Location.Path) - err := createJobSecret(secretName, namespace, credentialData, labels) + err := utils.CreateJobSecret(secretName, namespace, credentialData, labels) return err } @@ -1960,7 +2024,7 @@ func createAzureSecret(secretName string, backupLocation *storkapi.BackupLocatio credentialData["path"] = []byte(backupLocation.Location.Path) credentialData["storageaccountname"] = []byte(backupLocation.Location.AzureConfig.StorageAccountName) credentialData["storageaccountkey"] = []byte(backupLocation.Location.AzureConfig.StorageAccountKey) - err := createJobSecret(secretName, namespace, credentialData, labels) + err := utils.CreateJobSecret(secretName, namespace, credentialData, labels) return err } @@ -1977,7 +2041,7 @@ func createCertificateSecret(secretName, namespace string, labels map[string]str certData := make(map[string][]byte) certData[drivers.CertFileName] = certificateData - err = createJobSecret(secretName, namespace, certData, labels) + err = utils.CreateJobSecret(secretName, namespace, certData, labels) return err } @@ -1985,32 +2049,6 @@ func createCertificateSecret(secretName, namespace string, labels map[string]str return nil } -func createJobSecret( - secretName string, - namespace string, - credentialData map[string][]byte, - labels map[string]string, -) error { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: namespace, - Labels: labels, - Annotations: map[string]string{ - utils.SkipResourceAnnotation: "true", - }, - }, - Data: credentialData, - Type: corev1.SecretTypeOpaque, - } - _, err := core.Instance().CreateSecret(secret) - if err != nil && k8sErrors.IsAlreadyExists(err) { - return nil - } - - return err -} - func toSnapName(pvcName, dataExportUID string) string { truncatedPVCName := pvcName if len(pvcName) > pvcNameLenLimit { diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go b/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go index 75849ee94d..e4f4a25271 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go @@ -2,6 +2,7 @@ package drivers import ( "fmt" + batchv1 "k8s.io/api/batch/v1" ) @@ -14,6 +15,9 @@ const ( KopiaRestore = "kopiarestore" KopiaDelete = "kopiadelete" KopiaMaintenance = "kopiamaintenance" + NFSBackup = "nfsbackup" + NFSRestore = "nfsrestore" + NFSDelete = "nfsdelete" ) // Docker images. @@ -21,6 +25,7 @@ const ( ResticExecutorImage = "portworx/resticexecutor" KopiaExecutorImage = "kopiaexecutor" RsyncImage = "eeacms/rsync" + NfsExecutorImage = "nfsexecutor" ) // Driver labels. @@ -46,6 +51,7 @@ const ( CertFileName = "public.crt" CertSecretName = "tls-s3-cert" CertMount = "/etc/tls-s3-cert" + NfsMount = "/tmp/nfs-target/" ) // Driver job options. @@ -70,6 +76,10 @@ const ( KopiaExecutorRequestMemory = "KDMP_KOPIAEXECUTOR_REQUEST_MEMORY" KopiaExecutorLimitCPU = "KDMP_KOPIAEXECUTOR_LIMIT_CPU" KopiaExecutorLimitMemory = "KDMP_KOPIAEXECUTOR_LIMIT_MEMORY" + NFSExecutorRequestCPU = "KDMP_NFSEXECUTOR_REQUEST_CPU" + NFSExecutorRequestMemory = "KDMP_NFSEXECUTOR_REQUEST_MEMORY" + NFSExecutorLimitCPU = "KDMP_NFSEXECUTOR_LIMIT_CPU" + NFSExecutorLimitMemory = "KDMP_NFSEXECUTOR_LIMIT_MEMORNFS" ) // Default parameters for job options. @@ -86,6 +96,10 @@ const ( DefaultKopiaExecutorRequestMemory = "700Mi" DefaultKopiaExecutorLimitCPU = "0.2" DefaultKopiaExecutorLimitMemory = "1Gi" + DefaultNFSExecutorRequestCPU = "0.1" + DefaultNFSExecutorRequestMemory = "700Mi" + DefaultNFSExecutorLimitCPU = "0.2" + DefaultNFSExecutorLimitMemory = "1Gi" ) var ( diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/driversinstance/driversinstance.go b/vendor/github.com/portworx/kdmp/pkg/drivers/driversinstance/driversinstance.go index 44998fb49b..886d422f58 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/driversinstance/driversinstance.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/driversinstance/driversinstance.go @@ -9,6 +9,9 @@ import ( "github.com/portworx/kdmp/pkg/drivers/kopiadelete" "github.com/portworx/kdmp/pkg/drivers/kopiamaintenance" "github.com/portworx/kdmp/pkg/drivers/kopiarestore" + "github.com/portworx/kdmp/pkg/drivers/nfsbackup" + "github.com/portworx/kdmp/pkg/drivers/nfsdelete" + "github.com/portworx/kdmp/pkg/drivers/nfsrestore" "github.com/portworx/kdmp/pkg/drivers/resticbackup" "github.com/portworx/kdmp/pkg/drivers/resticrestore" "github.com/portworx/kdmp/pkg/drivers/rsync" @@ -24,6 +27,9 @@ var ( drivers.KopiaRestore: kopiarestore.Driver{}, drivers.KopiaDelete: kopiadelete.Driver{}, drivers.KopiaMaintenance: kopiamaintenance.Driver{}, + drivers.NFSBackup: nfsbackup.Driver{}, + drivers.NFSRestore: nfsrestore.Driver{}, + drivers.NFSDelete: nfsdelete.Driver{}, } ) diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go index 5884bfb830..a89597e40f 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go @@ -93,6 +93,15 @@ func (d Driver) StartJob(opts ...drivers.JobOption) (id string, err error) { logrus.Errorf("%s: %v", fn, errMsg) return "", fmt.Errorf(errMsg) } + + // Create PV & PVC only in case of NFS. + if o.NfsServer != "" { + err := utils.CreateNFSPvPvcForJob(jobName, job.ObjectMeta.Namespace, o) + if err != nil { + return "", err + } + } + if _, err = batch.Instance().CreateJob(job); err != nil && !apierrors.IsAlreadyExists(err) { errMsg := fmt.Sprintf("creation of backup job %s failed: %v", jobName, err) logrus.Errorf("%s: %v", fn, errMsg) @@ -274,28 +283,16 @@ func jobFor( splitCmd = append(splitCmd, "--compression", jobOption.Compression) cmd = strings.Join(splitCmd, " ") } - imageRegistry, imageRegistrySecret, err := utils.GetKopiaExecutorImageRegistryAndSecret( + kopiaExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.KopiaExecutorImage, jobOption.KopiaImageExecutorSource, jobOption.KopiaImageExecutorSourceNs, - ) + jobName, + jobOption) if err != nil { - logrus.Errorf("jobFor: getting kopia image registry and image secret failed during backup: %v", err) - return nil, err + errMsg := fmt.Errorf("failed to get the executor image details for job %s", jobName) + logrus.Errorf("%v", errMsg) + return nil, errMsg } - if len(imageRegistrySecret) != 0 { - err = utils.CreateImageRegistrySecret(imageRegistrySecret, jobName, jobOption.KopiaImageExecutorSourceNs, jobOption.Namespace) - if err != nil { - return nil, err - } - - } - var kopiaExecutorImage string - if len(imageRegistry) != 0 { - kopiaExecutorImage = fmt.Sprintf("%s/%s", imageRegistry, utils.GetKopiaExecutorImageName()) - } else { - kopiaExecutorImage = utils.GetKopiaExecutorImageName() - } - job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, @@ -362,7 +359,25 @@ func jobFor( }, }, } - + if len(jobOption.NfsServer) != 0 { + volumeMount := corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + job.Spec.Template.Spec.Containers[0].VolumeMounts, + volumeMount, + ) + volume := corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobName), + }, + }, + } + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) + } if drivers.CertFilePath != "" { volumeMount := corev1.VolumeMount{ Name: utils.TLSCertMountVol, diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackuplive.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackuplive.go index 48f40d57de..119699e0a2 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackuplive.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackuplive.go @@ -69,29 +69,16 @@ func jobForLiveBackup( } privileged := true - - imageRegistry, imageRegistrySecret, err := utils.GetKopiaExecutorImageRegistryAndSecret( + kopiaExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.KopiaExecutorImage, jobOption.KopiaImageExecutorSource, jobOption.KopiaImageExecutorSourceNs, - ) + jobName, + jobOption) if err != nil { - logrus.Errorf("jobFor: getting kopia image registry and image secret failed during live backup: %v", err) - return nil, err - } - if len(imageRegistrySecret) != 0 { - err = utils.CreateImageRegistrySecret(imageRegistrySecret, jobName, jobOption.KopiaImageExecutorSourceNs, jobOption.Namespace) - if err != nil { - return nil, err - } + errMsg := fmt.Errorf("failed to get the executor image details for job %s", jobName) + logrus.Errorf("%v", errMsg) + return nil, errMsg } - - var kopiaExecutorImage string - if len(imageRegistry) != 0 { - kopiaExecutorImage = fmt.Sprintf("%s/%s", imageRegistry, utils.GetKopiaExecutorImageName()) - } else { - kopiaExecutorImage = utils.GetKopiaExecutorImageName() - } - job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, @@ -163,6 +150,26 @@ func jobForLiveBackup( }, } + if len(jobOption.NfsServer) != 0 { + volumeMount := corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + job.Spec.Template.Spec.Containers[0].VolumeMounts, + volumeMount, + ) + volume := corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobName), + }, + }, + } + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) + } + if drivers.CertFilePath != "" { volumeMount := corev1.VolumeMount{ Name: utils.TLSCertMountVol, diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go index ab6dcfd574..6908bacac7 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go @@ -84,6 +84,15 @@ func (d Driver) StartJob(opts ...drivers.JobOption) (id string, err error) { logrus.Errorf("%s %v", fn, errMsg) return "", fmt.Errorf(errMsg) } + + // Create PV & PVC only in case of NFS. + if o.NfsServer != "" { + err := utils.CreateNFSPvPvcForJob(jobName, job.ObjectMeta.Namespace, o) + if err != nil { + return "", err + } + } + if _, err = batch.Instance().CreateJob(job); err != nil && !apierrors.IsAlreadyExists(err) { errMsg := fmt.Sprintf("creation of backup snapshot delete job [%s] failed: %v", jobName, err) logrus.Errorf("%s %v", fn, errMsg) @@ -184,19 +193,15 @@ func jobFor( jobOption.VolumeBackupDeleteNamespace, }, " ") - imageRegistry, imageRegistrySecret, err := utils.GetKopiaExecutorImageRegistryAndSecret( + kopiaExecutorImage, imageRegistrySecret, err := utils.GetExecutorImageAndSecret(drivers.KopiaExecutorImage, jobOption.KopiaImageExecutorSource, jobOption.KopiaImageExecutorSourceNs, - ) + jobName, + jobOption) if err != nil { - logrus.Errorf("jobFor: getting kopia image registry and image secret failed during delete: %v", err) - return nil, err - } - var kopiaExecutorImage string - if len(imageRegistry) != 0 { - kopiaExecutorImage = fmt.Sprintf("%s/%s", imageRegistry, utils.GetKopiaExecutorImageName()) - } else { - kopiaExecutorImage = utils.GetKopiaExecutorImageName() + errMsg := fmt.Errorf("failed to get the executor image details for job %s", jobName) + logrus.Errorf("%v", errMsg) + return nil, errMsg } job := &batchv1.Job{ @@ -255,6 +260,27 @@ func jobFor( }, } + if len(jobOption.NfsServer) != 0 { + volumeMount := corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + job.Spec.Template.Spec.Containers[0].VolumeMounts, + volumeMount, + ) + volume := corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobName), + }, + }, + } + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) + } + if drivers.CertFilePath != "" { volumeMount := corev1.VolumeMount{ Name: utils.TLSCertMountVol, diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiamaintenance/kopiamaintenance.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiamaintenance/kopiamaintenance.go index 0457394876..642898867d 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiamaintenance/kopiamaintenance.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiamaintenance/kopiamaintenance.go @@ -70,6 +70,14 @@ func (d Driver) StartJob(opts ...drivers.JobOption) (id string, err error) { return "", fmt.Errorf(errMsg) } + // Create PV & PVC only in case of NFS. + if o.NfsServer != "" { + err := utils.CreateNFSPvPvcForJob(jobName, o.JobNamespace, o) + if err != nil { + return "", err + } + } + if requiresV1 { jobV1 := job.(*batchv1.CronJob) _, err = batch.Instance().CreateCronJob(jobV1) @@ -196,19 +204,15 @@ func jobFor( jobOption.MaintenanceType, }, " ") - imageRegistry, imageRegistrySecret, err := utils.GetKopiaExecutorImageRegistryAndSecret( + kopiaExecutorImage, imageRegistrySecret, err := utils.GetExecutorImageAndSecret(drivers.KopiaExecutorImage, jobOption.KopiaImageExecutorSource, jobOption.KopiaImageExecutorSourceNs, - ) + jobName, + jobOption) if err != nil { - logrus.Errorf("jobFor: getting kopia image registry and image secret failed during maintenance: %v", err) - return nil, err - } - var kopiaExecutorImage string - if len(imageRegistry) != 0 { - kopiaExecutorImage = fmt.Sprintf("%s/%s", imageRegistry, utils.GetKopiaExecutorImageName()) - } else { - kopiaExecutorImage = utils.GetKopiaExecutorImageName() + errMsg := fmt.Errorf("failed to get the executor image details for job %s", jobName) + logrus.Errorf("%v", errMsg) + return nil, errMsg } jobObjectMeta := metav1.ObjectMeta{ @@ -257,10 +261,30 @@ func jobFor( }, }, } - var volumeMount corev1.VolumeMount var volume corev1.Volume var env []corev1.EnvVar + + if len(jobOption.NfsServer) != 0 { + volumeMount = corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + jobSpec.Containers[0].VolumeMounts = append( + jobSpec.Containers[0].VolumeMounts, + volumeMount, + ) + volume = corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobName), + }, + }, + } + jobSpec.Volumes = append(jobSpec.Volumes, volume) + } + if drivers.CertFilePath != "" { volumeMount = corev1.VolumeMount{ Name: utils.TLSCertMountVol, diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiarestore/kopiarestore.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiarestore/kopiarestore.go index ed5beddff1..0864e89035 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiarestore/kopiarestore.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiarestore/kopiarestore.go @@ -70,6 +70,15 @@ func (d Driver) StartJob(opts ...drivers.JobOption) (id string, err error) { if err != nil { return "", err } + + // Create PV & PVC only in case of NFS. + if o.NfsServer != "" { + err := utils.CreateNFSPvPvcForJob(jobName, job.ObjectMeta.Namespace, o) + if err != nil { + return "", err + } + } + if _, err = batch.Instance().CreateJob(job); err != nil && !apierrors.IsAlreadyExists(err) { return "", err } @@ -191,28 +200,16 @@ func jobFor( vb.Status.SnapshotID, }, " ") - imageRegistry, imageRegistrySecret, err := utils.GetKopiaExecutorImageRegistryAndSecret( + kopiaExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.KopiaExecutorImage, jobOption.KopiaImageExecutorSource, jobOption.KopiaImageExecutorSourceNs, - ) + jobName, + jobOption) if err != nil { - logrus.Errorf("jobFor: getting kopia image registry and image secret failed during restore: %v", err) - return nil, err - } - if len(imageRegistrySecret) != 0 { - err = utils.CreateImageRegistrySecret(imageRegistrySecret, jobName, jobOption.KopiaImageExecutorSourceNs, jobOption.Namespace) - if err != nil { - return nil, err - } + errMsg := fmt.Errorf("failed to get the executor image details for job %s", jobName) + logrus.Errorf("%v", errMsg) + return nil, errMsg } - - var kopiaExecutorImage string - if len(imageRegistry) != 0 { - kopiaExecutorImage = fmt.Sprintf("%s/%s", imageRegistry, utils.GetKopiaExecutorImageName()) - } else { - kopiaExecutorImage = utils.GetKopiaExecutorImageName() - } - job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, @@ -313,6 +310,26 @@ func jobFor( job.Spec.Template.Spec.Containers[0].Env = env } + if len(jobOption.NfsServer) != 0 { + volumeMount := corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + job.Spec.Template.Spec.Containers[0].VolumeMounts, + volumeMount, + ) + volume := corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobName), + }, + }, + } + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) + } + return job, nil } diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go new file mode 100644 index 0000000000..6322a24348 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go @@ -0,0 +1,277 @@ +package nfsbackup + +import ( + "fmt" + "strings" + + "github.com/portworx/kdmp/pkg/drivers" + "github.com/portworx/kdmp/pkg/drivers/utils" + "github.com/portworx/sched-ops/k8s/batch" + "github.com/portworx/sched-ops/k8s/kdmp" + + "github.com/sirupsen/logrus" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Driver is a nfsbackup implementation of the data export interface. +type Driver struct{} + +// Name returns a name of the driver. +func (d Driver) Name() string { + return drivers.NFSBackup +} + +// StartJob creates a job for data transfer between volumes. +func (d Driver) StartJob(opts ...drivers.JobOption) (id string, err error) { + // FOr every ns to be backed up a new job should be created + funct := "NfsStartJob" + logrus.Infof("Inside function %s", funct) + o := drivers.JobOpts{} + for _, opt := range opts { + if opt != nil { + if err := opt(&o); err != nil { + return "", err + } + } + } + + job, err := buildJob(o) + if err != nil { + return "", err + } + + // Create PV & PVC only in case of NFS. + jobName := o.RestoreExportName + if o.NfsServer != "" { + err := utils.CreateNFSPvPvcForJob(jobName, job.ObjectMeta.Namespace, o) + if err != nil { + return "", err + } + } + + if _, err = batch.Instance().CreateJob(job); err != nil && !apierrors.IsAlreadyExists(err) { + errMsg := fmt.Sprintf("creation of nfs backup job %s failed: %v", o.RestoreExportName, err) + logrus.Errorf("%s: %v", funct, errMsg) + return "", fmt.Errorf(errMsg) + } + + return utils.NamespacedName(job.Namespace, job.Name), nil +} + +// DeleteJob stops data transfer between volumes. +func (d Driver) DeleteJob(id string) error { + + return nil +} + +// JobStatus returns a progress status for a data transfer. +func (d Driver) JobStatus(id string) (*drivers.JobStatus, error) { + fn := "JobStatus" + namespace, name, err := utils.ParseJobID(id) + if err != nil { + return utils.ToNFSJobStatus(err.Error(), batchv1.JobConditionType("")), nil + } + + job, err := batch.Instance().GetJob(name, namespace) + if err != nil { + errMsg := fmt.Sprintf("failed to fetch backup %s/%s job: %v", namespace, name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return nil, fmt.Errorf(errMsg) + } + var jobStatus batchv1.JobConditionType + if len(job.Status.Conditions) != 0 { + jobStatus = job.Status.Conditions[0].Type + + } + err = utils.JobNodeExists(job) + if err != nil { + errMsg := fmt.Sprintf("failed to fetch the node info tied to the job %s/%s: %v", namespace, name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return nil, fmt.Errorf(errMsg) + } + jobErr, nodeErr := utils.IsJobOrNodeFailed(job) + var errMsg string + if jobErr { + errMsg = fmt.Sprintf("check %s/%s job for details: %s", namespace, name, drivers.ErrJobFailed) + return utils.ToNFSJobStatus(errMsg, jobStatus), nil + } + if nodeErr { + errMsg = fmt.Sprintf("Node [%v] on which job [%v/%v] schedules is NotReady", job.Spec.Template.Spec.NodeName, namespace, name) + return utils.ToNFSJobStatus(errMsg, jobStatus), nil + } + + res, err := kdmp.Instance().GetResourceBackup(name, namespace) + if err != nil { + if apierrors.IsNotFound(err) { + if utils.IsJobPending(job) { + logrus.Warnf("backup job %s is in pending state", job.Name) + return utils.ToNFSJobStatus(err.Error(), jobStatus), nil + } + } + } + + return utils.ToNFSJobStatus(res.Status.Reason, jobStatus), nil +} + +func buildJob( + jobOptions drivers.JobOpts, +) (*batchv1.Job, error) { + funct := "NfsbuildJob" + // Setup service account using same role permission as stork role + logrus.Infof("Inside %s function", funct) + if err := utils.SetupNFSServiceAccount(jobOptions.RestoreExportName, jobOptions.Namespace, roleFor()); err != nil { + errMsg := fmt.Sprintf("error creating service account %s/%s: %v", jobOptions.Namespace, jobOptions.RestoreExportName, err) + logrus.Errorf("%s: %v", funct, errMsg) + return nil, fmt.Errorf(errMsg) + } + + resources, err := utils.NFSResourceRequirements(jobOptions.JobConfigMap, jobOptions.JobConfigMapNs) + if err != nil { + return nil, err + } + + job, err := jobForBackupResource(jobOptions, resources) + if err != nil { + errMsg := fmt.Sprintf("building resource backup job %s failed: %v", jobOptions.RestoreExportName, err) + logrus.Errorf("%s: %v", funct, errMsg) + return nil, fmt.Errorf(errMsg) + } + + return job, nil +} + +func roleFor() *rbacv1.ClusterRole { + role := &rbacv1.ClusterRole{ + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"*"}, + Resources: []string{"*"}, + Verbs: []string{rbacv1.VerbAll}, + }, + }, + } + + return role +} + +func addJobLabels(labels map[string]string) map[string]string { + if labels == nil { + labels = make(map[string]string) + } + + labels[drivers.DriverNameLabel] = drivers.NFSBackup + return labels +} + +func jobForBackupResource( + jobOption drivers.JobOpts, + resources corev1.ResourceRequirements, +) (*batchv1.Job, error) { + cmd := strings.Join([]string{ + "/nfsexecutor", + "backup", + "--app-cr-name", + jobOption.AppCRName, + "--backup-namespace", + jobOption.AppCRNamespace, + // resourcebackup CR name + "--rb-cr-name", + jobOption.ResoureBackupName, + // resourcebackup CR namespace + "--rb-cr-namespace", + jobOption.ResoureBackupNamespace, + }, " ") + + labels := addJobLabels(jobOption.Labels) + + nfsExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.NfsExecutorImage, + jobOption.NfsImageExecutorSource, + jobOption.NfsImageExecutorSourceNs, + jobOption.JobName, + jobOption) + + if err != nil { + logrus.Errorf("failed to get the executor image details") + return nil, fmt.Errorf("failed to get the executor image details for job %s", jobOption.JobName) + } + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobOption.RestoreExportName, + Namespace: jobOption.Namespace, + Annotations: map[string]string{ + utils.SkipResourceAnnotation: "true", + }, + Labels: labels, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &utils.JobPodBackOffLimit, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ImagePullSecrets: utils.ToImagePullSecret(utils.GetImageSecretName(jobOption.RestoreExportName)), + ServiceAccountName: jobOption.RestoreExportName, + Containers: []corev1.Container{ + { + Name: drivers.NfsExecutorImage, + Image: nfsExecutorImage, + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "/bin/sh", + "-x", + "-c", + cmd, + }, + Resources: resources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "cred-secret", + MountPath: drivers.KopiaCredSecretMount, + ReadOnly: true, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "cred-secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: utils.GetCredSecretName(jobOption.RestoreExportName), + }, + }, + }, + }, + }, + }, + }, + } + if len(jobOption.NfsServer) != 0 { + volumeMount := corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + job.Spec.Template.Spec.Containers[0].VolumeMounts, + volumeMount, + ) + volume := corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobOption.RestoreExportName), + }, + }, + } + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) + } + + return job, nil +} diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsdelete/nfsdelete.go b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsdelete/nfsdelete.go new file mode 100644 index 0000000000..2a5d772e30 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsdelete/nfsdelete.go @@ -0,0 +1,253 @@ +package nfsdelete + +import ( + "fmt" + "strings" + "sync" + + "github.com/portworx/kdmp/pkg/drivers" + "github.com/portworx/kdmp/pkg/drivers/utils" + "github.com/portworx/kdmp/pkg/jobratelimit" + "github.com/portworx/sched-ops/k8s/batch" + "github.com/sirupsen/logrus" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Driver is an implementation of resource delete in NFS. +type Driver struct{} + +// Name returns a name of the driver. +func (d Driver) Name() string { + return drivers.NFSBackup +} + +var deleteJobLock sync.Mutex + +// StartJob creates a job for resource delete. +func (d Driver) StartJob(opts ...drivers.JobOption) (id string, err error) { + fn := "StartJob" + deleteJobLock.Lock() + defer deleteJobLock.Unlock() + o := drivers.JobOpts{} + for _, opt := range opts { + if opt != nil { + if err := opt(&o); err != nil { + return "", err + } + } + } + // Check whether there is slot to schedule delete job. + available, err := jobratelimit.CanJobBeScheduled(d.Name()) + if err != nil { + logrus.Errorf("%v", err) + return "", err + } + if !available { + return "", utils.ErrOutOfJobResources + } + + job, err := buildJob(o) + if err != nil { + errMsg := fmt.Sprintf("building of resource delete job [%s] failed: %v", job.Name, err) + logrus.Errorf("%s %v", fn, errMsg) + return "", fmt.Errorf(errMsg) + } + + // Create PV & PVC only in case of NFS. + if o.NfsServer != "" { + err := utils.CreateNFSPvPvcForJob(o.JobName, job.ObjectMeta.Namespace, o) + if err != nil { + return "", err + } + } + + if _, err = batch.Instance().CreateJob(job); err != nil && !apierrors.IsAlreadyExists(err) { + errMsg := fmt.Sprintf("creation of resource delete job %s failed: %v", job.Name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return "", fmt.Errorf(errMsg) + } + + logrus.Infof("%s created resource delete job [%s] successfully", fn, job.Name) + return utils.NamespacedName(job.Namespace, job.Name), nil + +} + +// DeleteJob deletes the resource delete job. +func (d Driver) DeleteJob(id string) error { + fn := "DeleteJob:" + namespace, name, err := utils.ParseJobID(id) + if err != nil { + logrus.Errorf("%s %v", fn, err) + return err + } + if err = batch.Instance().DeleteJob(name, namespace); err != nil && !apierrors.IsNotFound(err) { + errMsg := fmt.Sprintf("deletion of resource delete job [%s/%s] failed: %v", namespace, name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return fmt.Errorf(errMsg) + } + + return nil +} + +// JobStatus fetches job status +func (d Driver) JobStatus(id string) (*drivers.JobStatus, error) { + fn := "JobStatus" + namespace, name, err := utils.ParseJobID(id) + if err != nil { + return utils.ToJobStatus(0, err.Error(), batchv1.JobConditionType("")), nil + } + + job, err := batch.Instance().GetJob(name, namespace) + if err != nil { + errMsg := fmt.Sprintf("failed to fetch resource delete %s/%s job: %v", namespace, name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return nil, fmt.Errorf(errMsg) + } + err = utils.JobNodeExists(job) + if err != nil { + errMsg := fmt.Sprintf("failed to fetch the node info tied to the job %s/%s: %v", namespace, name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return nil, fmt.Errorf(errMsg) + } + var jobStatus batchv1.JobConditionType + if len(job.Status.Conditions) != 0 { + jobStatus = job.Status.Conditions[0].Type + + } + + if utils.IsJobFailed(job) { + errMsg := fmt.Sprintf("check %s/%s job for details: %s", namespace, name, drivers.ErrJobFailed) + return utils.ToJobStatus(0, errMsg, jobStatus), nil + } + if utils.IsJobCompleted(job) { + return utils.ToJobStatus(drivers.TransferProgressCompleted, "", jobStatus), nil + } + return utils.ToJobStatus(0, "", jobStatus), nil +} + +func buildJob( + jobOptions drivers.JobOpts, +) (*batchv1.Job, error) { + resources, err := utils.NFSResourceRequirements(jobOptions.JobConfigMap, jobOptions.JobConfigMapNs) + if err != nil { + return nil, err + } + labels := addJobLabels(jobOptions.Labels, jobOptions) + return jobForDeleteResource(jobOptions, resources, labels) +} + +func addJobLabels(labels map[string]string, jobOpts drivers.JobOpts) map[string]string { + if labels == nil { + labels = make(map[string]string) + } + + labels[drivers.DriverNameLabel] = drivers.NFSDelete + labels[utils.BackupObjectNameKey] = jobOpts.BackupObjectName + labels[utils.BackupObjectUIDKey] = jobOpts.BackupObjectUID + return labels +} + +func jobForDeleteResource( + jobOption drivers.JobOpts, + resources corev1.ResourceRequirements, + labels map[string]string, +) (*batchv1.Job, error) { + cmd := strings.Join([]string{ + "/nfsexecutor", + "delete", + "--app-cr-name", + jobOption.AppCRName, + "--namespace", + jobOption.AppCRNamespace, + }, " ") + + nfsExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.NfsExecutorImage, + jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs, + jobOption.JobName, + jobOption) + if err != nil { + logrus.Errorf("failed to get the executor image details") + return nil, fmt.Errorf("failed to get the executor image details for job %s", jobOption.JobName) + } + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobOption.JobName, + Namespace: jobOption.JobNamespace, + Annotations: map[string]string{ + utils.SkipResourceAnnotation: "true", + }, + Labels: labels, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &utils.JobPodBackOffLimit, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ImagePullSecrets: utils.ToImagePullSecret(utils.GetImageSecretName(jobOption.JobName)), + ServiceAccountName: jobOption.ServiceAccountName, + Containers: []corev1.Container{ + { + Name: "nfsexecutor", + Image: nfsExecutorImage, + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "/bin/sh", + "-x", + "-c", + cmd, + }, + Resources: resources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "cred-secret", + MountPath: drivers.KopiaCredSecretMount, + ReadOnly: true, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "cred-secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: jobOption.CredSecretName, + }, + }, + }, + }, + }, + }, + }, + } + if len(jobOption.NfsServer) != 0 { + volumeMount := corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + job.Spec.Template.Spec.Containers[0].VolumeMounts, + volumeMount, + ) + volume := corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobOption.JobName), + }, + }, + } + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) + } + + return job, nil +} diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go new file mode 100644 index 0000000000..51908c3092 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go @@ -0,0 +1,296 @@ +package nfsrestore + +import ( + "fmt" + "strings" + + storkapi "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/portworx/kdmp/pkg/drivers" + "github.com/portworx/kdmp/pkg/drivers/utils" + "github.com/portworx/sched-ops/k8s/batch" + "github.com/portworx/sched-ops/k8s/kdmp" + storkops "github.com/portworx/sched-ops/k8s/stork" + + "github.com/sirupsen/logrus" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Driver is a nfsbackup implementation of the data export interface. +type Driver struct{} + +// Name returns a name of the driver. +func (d Driver) Name() string { + return drivers.NFSRestore +} + +// StartJob creates a job for data transfer between volumes. +func (d Driver) StartJob(opts ...drivers.JobOption) (id string, err error) { + // FOr every ns to be backed up a new job should be created + funct := "NfsStartJob" + logrus.Infof("Inside function %s", funct) + o := drivers.JobOpts{} + for _, opt := range opts { + if opt != nil { + if err := opt(&o); err != nil { + return "", err + } + } + } + + job, err := buildJob(o) + if err != nil { + return "", err + } + // Create PV & PVC only in case of NFS. + if o.NfsServer != "" { + err := utils.CreateNFSPvPvcForJob(o.RestoreExportName, job.ObjectMeta.Namespace, o) + if err != nil { + return "", err + } + } + + if _, err = batch.Instance().CreateJob(job); err != nil && !apierrors.IsAlreadyExists(err) { + errMsg := fmt.Sprintf("creation of restore job %s failed: %v", o.RestoreExportName, err) + logrus.Errorf("%s: %v", funct, errMsg) + return "", fmt.Errorf(errMsg) + } + + return utils.NamespacedName(job.Namespace, job.Name), nil +} + +// DeleteJob stops data transfer between volumes. +func (d Driver) DeleteJob(id string) error { + + return nil +} + +// JobStatus returns a progress status for a data transfer. +func (d Driver) JobStatus(id string) (*drivers.JobStatus, error) { + fn := "JobStatus" + namespace, name, err := utils.ParseJobID(id) + if err != nil { + return utils.ToNFSJobStatus(err.Error(), batchv1.JobConditionType("")), nil + } + + job, err := batch.Instance().GetJob(name, namespace) + if err != nil { + errMsg := fmt.Sprintf("failed to fetch restore %s/%s job: %v", namespace, name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return nil, fmt.Errorf(errMsg) + } + var jobStatus batchv1.JobConditionType + if len(job.Status.Conditions) != 0 { + jobStatus = job.Status.Conditions[0].Type + + } + err = utils.JobNodeExists(job) + if err != nil { + errMsg := fmt.Sprintf("failed to fetch the node info tied to the job %s/%s: %v", namespace, name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return nil, fmt.Errorf(errMsg) + } + jobErr, nodeErr := utils.IsJobOrNodeFailed(job) + + var errMsg string + if jobErr { + errMsg = fmt.Sprintf("check %s/%s job for details: %s", namespace, name, drivers.ErrJobFailed) + return utils.ToNFSJobStatus(errMsg, jobStatus), nil + } + if nodeErr { + errMsg = fmt.Sprintf("Node [%v] on which job [%v/%v] schedules is NotReady", job.Spec.Template.Spec.NodeName, namespace, name) + return utils.ToNFSJobStatus(errMsg, jobStatus), nil + } + + res, err := kdmp.Instance().GetResourceBackup(name, namespace) + if err != nil { + if apierrors.IsNotFound(err) { + if utils.IsJobPending(job) { + logrus.Warnf("restore job %s is in pending state", job.Name) + return utils.ToNFSJobStatus(err.Error(), jobStatus), nil + } + } + } + logrus.Tracef("%s jobStatus:%v", fn, jobStatus) + return utils.ToNFSJobStatus(res.Status.Reason, jobStatus), nil +} + +func buildJob( + jobOptions drivers.JobOpts, +) (*batchv1.Job, error) { + funct := "NfsbuildJob" + // Setup service account using same role permission as stork role + logrus.Infof("Inside %s function", funct) + if err := utils.SetupNFSServiceAccount(jobOptions.RestoreExportName, jobOptions.Namespace, roleFor()); err != nil { + errMsg := fmt.Sprintf("error creating service account %s/%s: %v", jobOptions.Namespace, jobOptions.RestoreExportName, err) + logrus.Errorf("%s: %v", funct, errMsg) + return nil, fmt.Errorf(errMsg) + } + + resources, err := utils.NFSResourceRequirements(jobOptions.JobConfigMap, jobOptions.JobConfigMapNs) + if err != nil { + return nil, err + } + job, err := jobForRestoreResource(jobOptions, resources) + if err != nil { + errMsg := fmt.Sprintf("building resource backup job %s failed: %v", jobOptions.RestoreExportName, err) + logrus.Errorf("%s: %v", funct, errMsg) + return nil, fmt.Errorf(errMsg) + } + + return job, nil +} + +func roleFor() *rbacv1.ClusterRole { + role := &rbacv1.ClusterRole{ + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"*"}, + Resources: []string{"*"}, + Verbs: []string{rbacv1.VerbAll}, + }, + }, + } + + return role +} + +func addJobLabels(labels map[string]string) map[string]string { + if labels == nil { + labels = make(map[string]string) + } + + labels[drivers.DriverNameLabel] = drivers.NFSRestore + return labels +} + +func jobForRestoreResource( + jobOption drivers.JobOpts, + resources corev1.ResourceRequirements, +) (*batchv1.Job, error) { + funct := "jobForRestoreResource" + // Read the ApplicationRestore stage and decide which restore operation to perform + restoreCR, err := storkops.Instance().GetApplicationRestore(jobOption.AppCRName, jobOption.AppCRNamespace) + if err != nil { + logrus.Errorf("%s: Error getting restore cr[%v/%v]: %v", funct, jobOption.AppCRNamespace, jobOption.AppCRName, err) + return nil, err + } + var opType string + switch restoreCR.Status.Stage { + case storkapi.ApplicationRestoreStageVolumes: + opType = "restore-vol" + case storkapi.ApplicationRestoreStageApplications: + opType = "restore" + default: + errMsg := fmt.Sprintf("invalid stage %v in applicationRestore CR[%v/%v]:", + restoreCR.Status.Stage, jobOption.AppCRNamespace, jobOption.AppCRName) + logrus.Errorf("%v", errMsg) + return nil, fmt.Errorf(errMsg) + } + + cmd := strings.Join([]string{ + "/nfsexecutor", + opType, + "--app-cr-name", + jobOption.AppCRName, + "--restore-namespace", + jobOption.AppCRNamespace, + // resourcebackup CR name + "--rb-cr-name", + jobOption.ResoureBackupName, + // resourcebackup CR namespace + "--rb-cr-namespace", + jobOption.ResoureBackupNamespace, + }, " ") + + labels := addJobLabels(jobOption.Labels) + + nfsExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.NfsExecutorImage, + jobOption.NfsImageExecutorSource, + jobOption.NfsImageExecutorSourceNs, + jobOption.JobName, + jobOption) + if err != nil { + logrus.Errorf("failed to get the executor image details") + return nil, fmt.Errorf("failed to get the executor image details for job %s", jobOption.JobName) + } + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobOption.RestoreExportName, + Namespace: jobOption.Namespace, + Annotations: map[string]string{ + utils.SkipResourceAnnotation: "true", + }, + Labels: labels, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &utils.JobPodBackOffLimit, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ImagePullSecrets: utils.ToImagePullSecret(utils.GetImageSecretName(jobOption.RestoreExportName)), + ServiceAccountName: jobOption.RestoreExportName, + Containers: []corev1.Container{ + { + Name: drivers.NfsExecutorImage, + Image: nfsExecutorImage, + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "/bin/sh", + "-x", + "-c", + cmd, + }, + Resources: resources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "cred-secret", + MountPath: drivers.KopiaCredSecretMount, + ReadOnly: true, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "cred-secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: utils.GetCredSecretName(jobOption.RestoreExportName), + }, + }, + }, + }, + }, + }, + }, + } + if len(jobOption.NfsServer) != 0 { + volumeMount := corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + job.Spec.Template.Spec.Containers[0].VolumeMounts, + volumeMount, + ) + volume := corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobOption.RestoreExportName), + }, + }, + } + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) + } + + return job, nil +} diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/options.go b/vendor/github.com/portworx/kdmp/pkg/drivers/options.go index e5041b48db..ec7ed018a7 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/options.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/options.go @@ -44,7 +44,112 @@ type JobOpts struct { JobConfigMapNs string KopiaImageExecutorSource string KopiaImageExecutorSourceNs string + NfsImageExecutorSource string + NfsImageExecutorSourceNs string NodeAffinity map[string]string + NfsServer string + NfsMountOption string + NfsSubPath string + NfsExportDir string + RestoreExportName string + AppCRName string + AppCRNamespace string + ResoureBackupName string + ResoureBackupNamespace string +} + +// WithResoureBackupName is job parameter +func WithResoureBackupName(name string) JobOption { + return func(opts *JobOpts) error { + opts.ResoureBackupName = strings.TrimSpace(name) + return nil + } +} + +// WithResoureBackupNamespace is job parameter +func WithResoureBackupNamespace(namespace string) JobOption { + return func(opts *JobOpts) error { + opts.ResoureBackupNamespace = strings.TrimSpace(namespace) + return nil + } +} + +// WithAppCRName is job parameter +func WithAppCRName(name string) JobOption { + return func(opts *JobOpts) error { + opts.AppCRName = strings.TrimSpace(name) + return nil + } +} + +// WithAppCRNamespace is job parameter +func WithAppCRNamespace(namespace string) JobOption { + return func(opts *JobOpts) error { + opts.AppCRNamespace = strings.TrimSpace(namespace) + return nil + } +} + +// WithRestoreExport is job parameter +func WithRestoreExport(name string) JobOption { + return func(opts *JobOpts) error { + opts.RestoreExportName = strings.TrimSpace(name) + return nil + } +} + +// WithNfsServer is job parameter. +func WithNfsServer(server string) JobOption { + return func(opts *JobOpts) error { + opts.NfsServer = strings.TrimSpace(server) + return nil + } +} + +// WithNfsMountOption is job parameter. +func WithNfsMountOption(server string) JobOption { + return func(opts *JobOpts) error { + opts.NfsMountOption = strings.TrimSpace(server) + return nil + } +} + +// WithNfsSubPath is job parameter. +func WithNfsSubPath(server string) JobOption { + return func(opts *JobOpts) error { + opts.NfsSubPath = strings.TrimSpace(server) + return nil + } +} + +// WithNfsExportDir is job parameter. +func WithNfsExportDir(exportDir string) JobOption { + return func(opts *JobOpts) error { + opts.NfsExportDir = strings.TrimSpace(exportDir) + return nil + } +} + +// WithNfsImageExecutorSource is job parameter. +func WithNfsImageExecutorSource(source string) JobOption { + return func(opts *JobOpts) error { + if strings.TrimSpace(source) == "" { + return fmt.Errorf("nfs image executor source should be set") + } + opts.NfsImageExecutorSource = strings.TrimSpace(source) + return nil + } +} + +// WithNfsImageExecutorSourceNs is job parameter. +func WithNfsImageExecutorSourceNs(namespace string) JobOption { + return func(opts *JobOpts) error { + if strings.TrimSpace(namespace) == "" { + return fmt.Errorf("nfs image executor source namespace should be set") + } + opts.NfsImageExecutorSourceNs = strings.TrimSpace(namespace) + return nil + } } // WithKopiaImageExecutorSource is job parameter. diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go index cb564524e2..f55cf5ca9f 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go @@ -26,9 +26,13 @@ const ( // BackupObjectUIDKey - label key to store backup object uid BackupObjectUIDKey = "backup-object-uid" // TLSCertMountVol mount vol name for tls certificate secret - TLSCertMountVol = "tls-secret" - defaultTimeout = 1 * time.Minute - progressCheckInterval = 5 * time.Second + TLSCertMountVol = "tls-secret" + // NfsVolumeName is the Volume spec's name to be used in kopia Job Spec + NfsVolumeName = "nfs-target" + // DefaultTimeout default timeout for tasks retry + DefaultTimeout = 1 * time.Minute + // ProgressCheckInterval regular interval at which task does a retry + ProgressCheckInterval = 5 * time.Second // KdmpConfigmapName kdmp config map name KdmpConfigmapName = "kdmp-config" // KdmpConfigmapNamespace kdmp config map ns @@ -74,7 +78,7 @@ func SetupServiceAccount(name, namespace string, role *rbacv1.Role) error { } return "", false, nil } - if _, err := task.DoRetryWithTimeout(t, defaultTimeout, progressCheckInterval); err != nil { + if _, err := task.DoRetryWithTimeout(t, DefaultTimeout, ProgressCheckInterval); err != nil { errMsg := fmt.Sprintf("max retries done, failed in fetching secret token of sa [%v/%v]: %v ", name, namespace, err) logrus.Errorf("%v", errMsg) // Exhausted all retries @@ -108,6 +112,64 @@ func CleanServiceAccount(name, namespace string) error { return nil } +// SetupNFSServiceAccount create a service account and bind it to a provided role. +func SetupNFSServiceAccount(name, namespace string, role *rbacv1.ClusterRole) error { + if role != nil { + role.Name, role.Namespace = name, namespace + role.Annotations = map[string]string{ + SkipResourceAnnotation: "true", + } + if _, err := rbacops.Instance().CreateClusterRole(role); err != nil && !errors.IsAlreadyExists(err) { + return fmt.Errorf("create %s/%s cluster role: %s", namespace, name, err) + } + if _, err := rbacops.Instance().CreateClusterRoleBinding(clusterRoleBindingFor(name, namespace)); err != nil && !errors.IsAlreadyExists(err) { + return fmt.Errorf("create %s/%s cluster rolebinding: %s", namespace, name, err) + } + } + var sa *corev1.ServiceAccount + var err error + if sa, err = coreops.Instance().CreateServiceAccount(serviceAccountFor(name, namespace)); err != nil && !errors.IsAlreadyExists(err) { + return fmt.Errorf("create %s/%s serviceaccount: %s", namespace, name, err) + } + var errMsg error + t := func() (interface{}, bool, error) { + sa, err = coreops.Instance().GetServiceAccount(name, namespace) + if err != nil { + errMsg = fmt.Errorf("failed fetching sa [%v/%v]: %v", name, namespace, err) + logrus.Errorf("%v", errMsg) + return "", true, fmt.Errorf("%v", errMsg) + } + if sa.Secrets == nil { + logrus.Infof("Returned sa-secret null") + errMsg = fmt.Errorf("secret token is missing in sa [%v/%v]", name, namespace) + return "", true, fmt.Errorf("%v", errMsg) + } + return "", false, nil + } + if _, err := task.DoRetryWithTimeout(t, DefaultTimeout, ProgressCheckInterval); err != nil { + eMsg := fmt.Errorf("max retries done, failed in fetching secret token of sa [%v/%v]: %v ", name, namespace, errMsg) + logrus.Errorf("%v", eMsg) + // Exhausted all retries + return eMsg + } + + tokenName := sa.Secrets[0].Name + secretToken, err := coreops.Instance().GetSecret(tokenName, namespace) + if err != nil { + errMsg := fmt.Errorf("failed in getting secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) + logrus.Errorf("%v", errMsg) + return errMsg + } + secretToken.Annotations[SkipResourceAnnotation] = "true" + _, err = coreops.Instance().UpdateSecret(secretToken) + if err != nil { + errMsg := fmt.Errorf("failed in updating the secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) + logrus.Errorf("%v", errMsg) + return errMsg + } + return nil +} + func roleBindingFor(name, namespace string) *rbacv1.RoleBinding { return &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ @@ -132,6 +194,29 @@ func roleBindingFor(name, namespace string) *rbacv1.RoleBinding { } } +func clusterRoleBindingFor(name, namespace string) *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: map[string]string{ + SkipResourceAnnotation: "true", + }, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: name, + Namespace: namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Name: name, + Kind: "ClusterRole", + APIGroup: rbacv1.GroupName, + }, + } +} + func serviceAccountFor(name, namespace string) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go index 2ba0f708ae..a0c856811a 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go @@ -4,9 +4,13 @@ import ( "errors" "fmt" "os" + "strconv" "strings" + "time" "github.com/aquilax/truncate" + storkapi "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/libopenstorage/stork/pkg/k8sutils" "github.com/portworx/kdmp/pkg/drivers" "github.com/portworx/kdmp/pkg/version" "github.com/portworx/sched-ops/k8s/apps" @@ -19,17 +23,23 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/wait" ) const ( defaultPXNamespace = "kube-system" - kdmpConfig = "kdmp-config" + // KdmpConfig defines the config map name of kdmp module + KdmpConfig = "kdmp-config" // TriggeredFromStork - denotes the kopia job is triggered from stork module TriggeredFromStork = "stork" // TriggeredFromPxBackup - denotes the kopia job is triggered from px-backup module TriggeredFromPxBackup = "px-backup" kopiaExecutorImageRegistryEnvVar = "KOPIA-EXECUTOR-IMAGE-REGISTRY" kopiaExecutorImageRegistrySecretEnvVar = "KOPIA-EXECUTOR-IMAGE-REGISTRY-SECRET" + // NfsExecutorImageRegistryEnvVar is the os environment variable for nfs executor image + NfsExecutorImageRegistryEnvVar = "NFS-EXECUTOR-IMAGE-REGISTRY" + // NfsExecutorImageRegistrySecretEnvVar is the os environ varibale for nfs executor image + NfsExecutorImageRegistrySecretEnvVar = "NFS-EXECUTOR-IMAGE-REGISTRY-SECRET" // AdminNamespace - kube-system namespace, where privilige pods will be deployed for live kopiabackup. AdminNamespace = "kube-system" imageSecretPrefix = "image-secret" @@ -39,6 +49,18 @@ const ( ImageSecret = "image-secret" // CertSecret - cert secret prefix CertSecret = "cert-secret" + // ResourceCleanupKey - this key controls the enable or disable of the resource cleanup process. + ResourceCleanupKey = "RESOURCE_CLEANUP" + // ResourceCleanupDefaultValue is true as resource cleanup process is enabled by default for debugging user can set to false. + ResourceCleanupDefaultValue = "true" + volumeinitialDelay = 2 * time.Second + volumeFactor = 1.5 + volumeSteps = 15 + nfsVolumeSize = "10Gi" + // ResourceUploadSuccessMsg - resource update success message + ResourceUploadSuccessMsg = "upload resource Successfully" + // PvcBoundSuccessMsg - pvc bound success message + PvcBoundSuccessMsg = "pvc bounded successfully" ) var ( @@ -47,6 +69,11 @@ var ( // ErrJobAlreadyRunning - Already a job is running for the given instance of PVC ErrJobAlreadyRunning = errors.New("job Already Running") ) +var volumeAPICallBackoff = wait.Backoff{ + Duration: volumeinitialDelay, + Factor: volumeFactor, + Steps: volumeSteps, +} // NamespacedName returns a name in form "/". func NamespacedName(namespace, name string) string { @@ -207,6 +234,34 @@ func ToJobStatus(progress float64, errMsg string, jobStatus batchv1.JobCondition } } +// ToNFSJobStatus returns a job status for provided parameters. +func ToNFSJobStatus(errMsg string, jobStatus batchv1.JobConditionType) *drivers.JobStatus { + // Note: This err msg has to match with the msg set in executor when the job + // is successful + // TODO: Need to have better logical way to notify job completion, this + // hard coding of msg doesn't look good + if errMsg == ResourceUploadSuccessMsg || + errMsg == PvcBoundSuccessMsg { + return &drivers.JobStatus{ + State: drivers.JobStateCompleted, + Reason: errMsg, + Status: jobStatus, + } + } + if len(errMsg) > 0 { + return &drivers.JobStatus{ + State: drivers.JobStateFailed, + Reason: errMsg, + Status: jobStatus, + } + } + + return &drivers.JobStatus{ + State: drivers.JobStateInProgress, + Status: jobStatus, + } +} + // GetConfigValue read configmap and return the value of the requested parameter // If error in reading from configmap, we try reading from env variable func GetConfigValue(cm, ns, key string) string { @@ -215,13 +270,30 @@ func GetConfigValue(cm, ns, key string) string { ns, ) if err != nil { - logrus.Warnf("Failed in getting value for key [%v] from configmap[%v]", key, kdmpConfig) + logrus.Warnf("Failed in getting value for key [%v] from configmap[%v]", key, KdmpConfig) // try reading from the Env variable return os.Getenv(key) } return configMap.Data[key] } +// DoCleanupResource returns whether to cleanup the CRs & other resources. +func DoCleanupResource() (bool, error) { + doCleanup := true + cleanupResourceVal, err := k8sutils.GetConfigValue(KdmpConfig, defaultPXNamespace, ResourceCleanupKey) + if err != nil { + logrus.Errorf("Failed to get %s key from kdmp-config-map: %v", ResourceCleanupKey, err) + return true, err + } + if cleanupResourceVal != "" { + doCleanup, err = strconv.ParseBool(cleanupResourceVal) + if err != nil { + return true, err + } + } + return doCleanup, nil +} + // ResticExecutorImage returns a docker image that contains resticexecutor binary. func ResticExecutorImage() string { if customImage := strings.TrimSpace(os.Getenv(drivers.ResticExecutorImageKey)); customImage != "" { @@ -255,7 +327,60 @@ func GetImageRegistryFromDeployment(name, namespace string) (string, string, err return registry, "", nil } +// GetExecutorImageAndSecret returns the image name and secret to use in the job pod +func GetExecutorImageAndSecret(executorImageType, deploymentName, deploymentNs, + jobName string, jobOption drivers.JobOpts) (string, string, error) { + var imageRegistry, imageRegistrySecret string + var err error + if executorImageType == drivers.KopiaExecutorImage { + if len(os.Getenv(kopiaExecutorImageRegistryEnvVar)) != 0 { + imageRegistry = os.Getenv(kopiaExecutorImageRegistryEnvVar) + imageRegistrySecret = os.Getenv(kopiaExecutorImageRegistrySecretEnvVar) + } + } else if executorImageType == drivers.NfsExecutorImage { + if len(os.Getenv(NfsExecutorImageRegistryEnvVar)) != 0 { + imageRegistry = os.Getenv(NfsExecutorImageRegistryEnvVar) + imageRegistrySecret = os.Getenv(NfsExecutorImageRegistrySecretEnvVar) + } + } + // Still we didn't get image registry from environment variable + if imageRegistry == "" { + imageRegistry, imageRegistrySecret, err = GetImageRegistryFromDeployment(deploymentName, deploymentNs) + if err != nil { + logrus.Errorf("GetExecutorImageRegistryAndSecret: error in getting image registory from %v:%v deployment", deploymentNs, deploymentName) + return "", "", err + } + } + if len(imageRegistrySecret) != 0 { + err = CreateImageRegistrySecret(imageRegistrySecret, jobName, jobOption.KopiaImageExecutorSourceNs, jobOption.Namespace) + if err != nil { + return "", "", err + } + } + // TODO Need to be optimized.. too many if else .. :-) + var ExecutorImage string + if len(imageRegistry) != 0 { + if executorImageType == drivers.KopiaExecutorImage { + ExecutorImage = fmt.Sprintf("%s/%s", imageRegistry, GetKopiaExecutorImageName()) + } else if executorImageType == drivers.NfsExecutorImage { + ExecutorImage = fmt.Sprintf("%s/%s", imageRegistry, GetNfsExecutorImageName()) + } + } else { + if executorImageType == drivers.KopiaExecutorImage { + ExecutorImage = GetKopiaExecutorImageName() + } else if executorImageType == drivers.NfsExecutorImage { + ExecutorImage = GetNfsExecutorImageName() + } + } + logrus.Infof("The returned image and secret is %v %v", ExecutorImage, imageRegistrySecret) + return ExecutorImage, imageRegistrySecret, nil +} + // GetKopiaExecutorImageRegistryAndSecret - will return the kopia image registry and image secret +// TODO: This is a duplicate method of GetExecutorImage(), +// but in CSI_snapshotter code we don't have jobOption passed, hence we are keeping this intact for now +// because anyway we are deferring changes to CSI to later point in time. At that time we will remove this function. +// by passing "nil" to jobOption from csi snapshotter path. func GetKopiaExecutorImageRegistryAndSecret(source, sourceNs string) (string, string, error) { var registry, registrySecret string var err error @@ -273,11 +398,57 @@ func GetKopiaExecutorImageRegistryAndSecret(source, sourceNs string) (string, st } +// CreateNfsSecret creates the NFS secret which will be mounted by job pod and accessed accordingly +func CreateNfsSecret(secretName string, backupLocation *storkapi.BackupLocation, namespace string, labels map[string]string) error { + credentialData := make(map[string][]byte) + credentialData["type"] = []byte(backupLocation.Location.Type) + credentialData["serverAddr"] = []byte(backupLocation.Location.NfsConfig.ServerAddr) + credentialData["password"] = []byte(backupLocation.Location.RepositoryPassword) + credentialData["path"] = []byte(backupLocation.Location.Path) + credentialData["subPath"] = []byte(backupLocation.Location.NfsConfig.SubPath) + + err := CreateJobSecret(secretName, namespace, credentialData, labels) + + return err +} + +// CreateJobSecret creates a job secret resource in k8s +func CreateJobSecret( + secretName string, + namespace string, + credentialData map[string][]byte, + labels map[string]string, +) error { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + Labels: labels, + Annotations: map[string]string{ + SkipResourceAnnotation: "true", + }, + }, + Data: credentialData, + Type: corev1.SecretTypeOpaque, + } + _, err := core.Instance().CreateSecret(secret) + if err != nil && apierrors.IsAlreadyExists(err) { + return nil + } + + return err +} + // GetKopiaExecutorImageName - will return the default kopia executor image func GetKopiaExecutorImageName() string { return strings.Join([]string{drivers.KopiaExecutorImage, version.Get().GitVersion}, ":") } +// GetNfsExecutorImageName - will return the default nfs executor image +func GetNfsExecutorImageName() string { + return strings.Join([]string{drivers.NfsExecutorImage, version.Get().GitVersion}, ":") +} + // RsyncImage returns a docker image that contains rsync binary. func RsyncImage() string { if customImage := strings.TrimSpace(os.Getenv(drivers.RsyncImageKey)); customImage != "" { @@ -339,6 +510,31 @@ func KopiaResourceRequirements(configMap, ns string) (corev1.ResourceRequirement return toResourceRequirements(requestCPU, requestMem, limitCPU, limitMem) } +// NFSResourceRequirements returns ResourceRequirements for the kopiaexecutor container. +func NFSResourceRequirements(configMap, ns string) (corev1.ResourceRequirements, error) { + requestCPU := strings.TrimSpace(GetConfigValue(configMap, ns, drivers.NFSExecutorRequestCPU)) + if requestCPU == "" { + requestCPU = drivers.DefaultNFSExecutorRequestCPU + } + + requestMem := strings.TrimSpace(GetConfigValue(configMap, ns, drivers.NFSExecutorRequestMemory)) + if requestMem == "" { + requestMem = drivers.DefaultNFSExecutorRequestMemory + } + + limitCPU := strings.TrimSpace(GetConfigValue(configMap, ns, drivers.NFSExecutorLimitCPU)) + if limitCPU == "" { + limitCPU = drivers.DefaultNFSExecutorLimitCPU + } + + limitMem := strings.TrimSpace(GetConfigValue(configMap, ns, drivers.NFSExecutorLimitMemory)) + if limitMem == "" { + limitMem = drivers.DefaultNFSExecutorLimitMemory + } + + return toResourceRequirements(requestCPU, requestMem, limitCPU, limitMem) +} + // ResticResourceRequirements returns JobResourceRequirements for the executor container. func ResticResourceRequirements() (corev1.ResourceRequirements, error) { requestCPU := drivers.DefaultResticExecutorRequestCPU @@ -451,17 +647,190 @@ func CreateImageRegistrySecret(sourceName, destName, sourceNamespace, destNamesp return nil } -//GetCredSecretName - get credential secret name +// GetCredSecretName - get credential secret name func GetCredSecretName(name string) string { return CredSecret + "-" + name } -//GetImageSecretName - get image secret name +// GetImageSecretName - get image secret name func GetImageSecretName(name string) string { return ImageSecret + "-" + name } -//GetCertSecretName - get cert secret name +// GetCertSecretName - get cert secret name func GetCertSecretName(name string) string { return CertSecret + "-" + name } + +// CreateNfsPv - Create a persistent volume for NFS specific jobs +func CreateNfsPv(pvName string, + nfsServerAddr string, + nfsExportDir string, + nfsMountOption string) error { + + fn := "CreateNfsPv" + // Let's Create PV & PVC before creating JOB + pv := &corev1.PersistentVolume{ + TypeMeta: metav1.TypeMeta{Kind: "PersistentVolume"}, + ObjectMeta: metav1.ObjectMeta{ + Name: pvName, + Annotations: map[string]string{ + SkipResourceAnnotation: "true", + }, + }, + Spec: corev1.PersistentVolumeSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + "ReadWriteMany", + }, + Capacity: corev1.ResourceList{ + corev1.ResourceName(corev1.ResourceStorage): resource.MustParse(nfsVolumeSize), + }, + PersistentVolumeSource: corev1.PersistentVolumeSource{ + NFS: &corev1.NFSVolumeSource{ + Server: nfsServerAddr, + Path: nfsExportDir, + ReadOnly: false, + }, + }, + MountOptions: []string{nfsMountOption}, + }, + } + + if _, err := core.Instance().CreatePersistentVolume(pv); err != nil && !apierrors.IsAlreadyExists(err) { + errMsg := fmt.Sprintf("creation of pv name [%s] failed: %v", pvName, err) + logrus.Errorf("%s: %v", fn, errMsg) + return fmt.Errorf(errMsg) + } + + // wait for pv to be available + _, err := WaitForPVAvailable(pvName) + if err != nil { + return err + } + + return nil +} + +// CreateNfsPvc - Create a persistent volume claim for NFS specific jobs +func CreateNfsPvc(pvcName string, pvName string, namespace string) error { + fn := "CreateNfsPvc" + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: namespace, + Annotations: map[string]string{ + SkipResourceAnnotation: "true", + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName(corev1.ResourceStorage): resource.MustParse(nfsVolumeSize), + }, + }, + VolumeName: pvName, + }, + } + + _, err := core.Instance().CreatePersistentVolumeClaim(pvc) + if err != nil && !apierrors.IsAlreadyExists(err) { + errMsg := fmt.Sprintf("creation of pvc name [%s] failed: %v", pvcName, err) + logrus.Errorf("%s: %v", fn, errMsg) + return fmt.Errorf(errMsg) + } + + // wait for pvc to get bound + _, err = WaitForPVCBound(pvcName, namespace) + if err != nil { + return err + } + + return nil +} + +// CreateNFSPvPvcForJob - this function creates PV and PVC for NFS job. +func CreateNFSPvPvcForJob(jobName string, namespace string, o drivers.JobOpts) error { + // create PV before creating job + nfsPvName := GetPvNameForJob(jobName) + if err := CreateNfsPv(nfsPvName, o.NfsServer, o.NfsExportDir, o.NfsMountOption); err != nil { + return err + } + logrus.Debugf("Created NFS PV successfully %s", nfsPvName) + // create pvc before creating job + nfsPvcName := GetPvcNameForJob(jobName) + if err := CreateNfsPvc(nfsPvcName, nfsPvName, namespace); err != nil { + return err + } + logrus.Debugf("Created NFS PVC successfully %s/%s", namespace, nfsPvcName) + return nil +} + +// WaitForPVCBound - This function makes the flow wait till the PVC moves to Bound state else returns timeout error. +func WaitForPVCBound(pvcName string, namespace string) (*corev1.PersistentVolumeClaim, error) { + if namespace == "" { + return nil, fmt.Errorf("namespace has to be set") + } + // wait for pvc to get bound + var pvc *corev1.PersistentVolumeClaim + var err error + var errMsg string + wErr := wait.ExponentialBackoff(volumeAPICallBackoff, func() (bool, error) { + pvc, err = core.Instance().GetPersistentVolumeClaim(pvcName, namespace) + if err != nil { + return false, err + } + + if pvc.Status.Phase != corev1.ClaimBound { + errMsg = fmt.Sprintf("nfs pvc status: expected %s, got %s for pvc %s/%s", corev1.ClaimBound, pvc.Status.Phase, namespace, pvcName) + logrus.Debugf("%v", errMsg) + return false, nil + } + + return true, nil + }) + + if wErr != nil { + logrus.Errorf("%v", wErr) + return nil, fmt.Errorf("%s:%s", wErr, errMsg) + } + return pvc, nil +} + +// WaitForPVAvailable - This function makes the flow wait till the PV becomes available else returns timeout error. +func WaitForPVAvailable(pvName string) (*corev1.PersistentVolume, error) { + // wait for pv to be available + var pv *corev1.PersistentVolume + var err error + var errMsg string + wErr := wait.ExponentialBackoff(volumeAPICallBackoff, func() (bool, error) { + pv, err = core.Instance().GetPersistentVolume(pvName) + if err != nil { + return false, err + } + // If the pv is not Available state or not Bound state, wait for it. + if !(pv.Status.Phase == corev1.VolumeAvailable || pv.Status.Phase == corev1.VolumeBound) { + errMsg = fmt.Sprintf("nfs pv [%v] status: expected %s, got %s", pvName, corev1.VolumeAvailable, pv.Status.Phase) + logrus.Debugf("%v", errMsg) + return false, nil + } + + return true, nil + }) + + if wErr != nil { + logrus.Errorf("%v:%v", wErr, errMsg) + return nil, fmt.Errorf("%s:%s", wErr, errMsg) + } + return pv, nil +} + +//GetPvcNameForJob - returns the PVC name for a job +func GetPvcNameForJob(jobName string) string { + return "pvc-" + jobName +} + +//GetPvNameForJob - returns pv name for a job +func GetPvNameForJob(jobName string) string { + return "pv-" + jobName +} diff --git a/vendor/github.com/portworx/kdmp/pkg/jobratelimit/jobratelimit.go b/vendor/github.com/portworx/kdmp/pkg/jobratelimit/jobratelimit.go index abd9e11d43..eea8956a27 100644 --- a/vendor/github.com/portworx/kdmp/pkg/jobratelimit/jobratelimit.go +++ b/vendor/github.com/portworx/kdmp/pkg/jobratelimit/jobratelimit.go @@ -48,6 +48,8 @@ func getJobLimitConfigmapKey(driverName string) (string, error) { return RestoreJobLimitKey, nil case drivers.KopiaDelete: return DeleteJobLimitKey, nil + case drivers.NFSDelete: + return DeleteJobLimitKey, nil case drivers.KopiaMaintenance: return MaintenanceJobLimitKey, nil default: @@ -105,6 +107,8 @@ func getDefaultJobLimit(jobType string) int { return DefaultRestoreJobLimit case drivers.KopiaDelete: return DefaultDeleteJobLimit + case drivers.NFSDelete: + return DefaultDeleteJobLimit case drivers.KopiaMaintenance: return DefaultMaintenanceJobLimit default: diff --git a/vendor/modules.txt b/vendor/modules.txt index 0db8683a7a..626e6be897 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -691,13 +691,14 @@ github.com/pierrec/lz4/internal/xxh32 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/portworx/kdmp v0.4.1-0.20220710173715-5d42efc7d149 +# github.com/portworx/kdmp v0.4.1-0.20221106080928-1fd01d5e6ed0 ## explicit github.com/portworx/kdmp/pkg/apis/kdmp github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1 github.com/portworx/kdmp/pkg/client/clientset/versioned github.com/portworx/kdmp/pkg/client/clientset/versioned/scheme github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1 +github.com/portworx/kdmp/pkg/controllers github.com/portworx/kdmp/pkg/controllers/dataexport github.com/portworx/kdmp/pkg/drivers github.com/portworx/kdmp/pkg/drivers/driversinstance @@ -705,6 +706,9 @@ github.com/portworx/kdmp/pkg/drivers/kopiabackup github.com/portworx/kdmp/pkg/drivers/kopiadelete github.com/portworx/kdmp/pkg/drivers/kopiamaintenance github.com/portworx/kdmp/pkg/drivers/kopiarestore +github.com/portworx/kdmp/pkg/drivers/nfsbackup +github.com/portworx/kdmp/pkg/drivers/nfsdelete +github.com/portworx/kdmp/pkg/drivers/nfsrestore github.com/portworx/kdmp/pkg/drivers/resticbackup github.com/portworx/kdmp/pkg/drivers/resticrestore github.com/portworx/kdmp/pkg/drivers/rsync @@ -731,7 +735,7 @@ github.com/portworx/px-object-controller/client/listers/objectservice/v1alpha1 github.com/portworx/px-object-controller/pkg/client github.com/portworx/px-object-controller/pkg/controller github.com/portworx/px-object-controller/pkg/utils -# github.com/portworx/sched-ops v1.20.4-rc1.0.20220401024625-dbc61a336f65 => github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a +# github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 => github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a ## explicit github.com/portworx/sched-ops/k8s/admissionregistration github.com/portworx/sched-ops/k8s/apiextensions @@ -1628,7 +1632,7 @@ k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue # k8s.io/cloud-provider v0.21.5 => k8s.io/cloud-provider v0.21.5 k8s.io/cloud-provider -# k8s.io/code-generator v0.21.5 => k8s.io/code-generator v0.21.5 +# k8s.io/code-generator v0.22.1 => k8s.io/code-generator v0.21.5 ## explicit k8s.io/code-generator k8s.io/code-generator/cmd/client-gen From 0b3868023f71d7fb4373317e65b2261ac629717a Mon Sep 17 00:00:00 2001 From: Lalatendu Das Date: Wed, 27 Jul 2022 06:48:28 +0000 Subject: [PATCH 55/97] pb-2939: enable NFS type backuplocation - added NFS type to stork v1alpha1 APIs - added objectlock specific API for for NFS - skipped bucket exist check in Application backup controller specifically for NFS scenario Signed-off-by: Lalatendu Das --- how ed4bac3825565902733b151138c32367231e48b3 | 11072 ++++++++++++++++ .../controllers/applicationbackup.go | 18 +- pkg/objectstore/nfs/nfs.go | 13 + pkg/objectstore/objectstore.go | 3 + 4 files changed, 11100 insertions(+), 6 deletions(-) create mode 100644 how ed4bac3825565902733b151138c32367231e48b3 create mode 100644 pkg/objectstore/nfs/nfs.go diff --git a/how ed4bac3825565902733b151138c32367231e48b3 b/how ed4bac3825565902733b151138c32367231e48b3 new file mode 100644 index 0000000000..e824a16359 --- /dev/null +++ b/how ed4bac3825565902733b151138c32367231e48b3 @@ -0,0 +1,11072 @@ +commit ed4bac3825565902733b151138c32367231e48b3 (HEAD -> pb-2939, origin/pb-2939) +Author: Lalatendu Das +Date: Wed Jul 27 06:48:28 2022 +0000 + + pb-2939: enable NFS type backuplocation + + - added NFS type to stork v1alpha1 APIs + - added objectlock specific API for for NFS + - skipped bucket exist check in Application backup + controller specifically for NFS scenario + + Signed-off-by: Lalatendu Das + +commit 207a503c45ae3f373f558b32a27b2a20e2199930 (origin/nfs-ea, origin/master, origin/HEAD, nfs-ea, master) +Author: sivakumar subraani +Date: Wed Aug 31 14:36:10 2022 +0000 + + pb-3005: Added fix to include the CRDs even if CR is are present. + + - With fix, we will include all the CRDs of a group, if one CRDs + of a parrticular group had a CR in the given namespace. + +commit fae173e9c7cecf9de0db8f1032a894b6a2b88084 +Author: Aditya Dani +Date: Tue Aug 30 09:36:57 2022 -0700 + + Log when stork takes a forceful snapshot + +commit 72bd1fe2809fa757332807c204eb7aee5b02e1cd +Author: Ram +Date: Mon Aug 29 18:21:28 2022 +0530 + + Force full backup on specified day in daily schedule + + Signed-off-by: Ram + +commit 25b400055100500a8b4bcf8fc2e21c533ee1487f +Author: Ram +Date: Sat Aug 27 14:57:33 2022 +0530 + + vendor update - sched-ops master + + Signed-off-by: Ram + +commit 0b54138f41d94eef252454d7f9ebd052a5ab4f59 +Author: Ram +Date: Sat Aug 27 14:57:05 2022 +0530 + + pwx-26151: skip collecting endpoints for headless service + + Signed-off-by: Ram + +commit 9782bc0f44250054346ac0d5089ce21da68d7fc8 +Author: Rohit-PX +Date: Mon Aug 15 22:26:01 2022 +0000 + + vendor updates + + Signed-off-by: Rohit-PX + +commit d630f1478d0ed353bb97b72990ba8f7e44d31b0b +Author: Ram +Date: Fri Aug 26 12:46:06 2022 +0530 + + Ensure to re-run transformation validation before each migration run + + Signed-off-by: Ram + +commit d3d4c9dd3d51f0e0b24f30f66739aa980f1eb49e +Author: Ram +Date: Wed Aug 24 20:27:59 2022 +0530 + + vendor update sched-ops + + Signed-off-by: Ram + +commit 5c6a98c625fba875466959c0c9f2b5899cf8fde0 +Author: Ram +Date: Wed Aug 24 20:27:04 2022 +0530 + + Dry-run resource transformation validation during migration prechecks + + - validate transform CR + - dry-run on newly detected object before starting migration + + Signed-off-by: Ram + +commit 338ee5d16d70740c1a65d6c667dc01789f2ed9a7 +Author: Ram +Date: Wed Aug 24 19:32:48 2022 +0530 + + Allow enable/disable resource transformation controller + + - addressed review comments + + Signed-off-by: Ram + +commit 0e2f00364d25c4d50908018cffa151782577c62b +Author: Ram +Date: Thu Aug 18 19:57:28 2022 +0530 + + vendor updates stork sched ops + + Signed-off-by: Ram + +commit 4307eea88180b29a575ebaa981e65e17e65f76ff +Author: Ram +Date: Thu Aug 18 19:56:24 2022 +0530 + + Add transformation rule handler in resourcecollector + + - allow dry run for keypair and slice value type + + Signed-off-by: Ram + +commit 71a23be2d835d72dd7085cd7cd8e9ab388e13955 +Author: Ram +Date: Thu Aug 18 19:54:48 2022 +0530 + + pwx-24979: integrate transform resource api with migration path + + - accept resource transformation in migration spec + - update resource as per transformation rule + + Signed-off-by: Ram + +commit 8e7b43a5ed1451e9bbf52ed7ded0c08c6066ae7c +Author: Ram +Date: Mon Aug 1 22:11:18 2022 +0530 + + PWx-24851: Enhance UX experience for setting up clusterpair for async-dr setups + + - query cluster pair token using px endpoint + port + - query port by looking at px-api service rest port + + Signed-off-by: Ram + +commit f7504fc6481ec72836f12b45cdba1f2d7200bf5e +Author: Ram +Date: Tue Aug 9 18:20:24 2022 +0530 + + Register and handle Resource Transformation events via controller + + - validate specs for resource transformation cr + - apply patch on unstruct k8s objects + - run patched resources on dry run namespace with DryRun option set to + all + + Signed-off-by: Ram + +commit d88159e3c4fb7280d2dcb9366f48663e40863dd6 +Author: Ram +Date: Mon Aug 1 15:14:30 2022 +0530 + + PWX-24976: Register ResourceTransformation CR api + + Signed-off-by: Ram + +commit 33f1d74c6b17890b6ab028b28ca45cc2e5ad715a +Author: Ram +Date: Mon Aug 1 14:41:51 2022 +0530 + + codegen generated file for resource transformation CR + + Signed-off-by: Ram + +commit 60d0c119d70efb14b7471869a8ec4073eb5bc1ab +Author: Ram +Date: Mon Aug 1 11:49:50 2022 +0530 + + PWX-26033: Dont include FA/FB device for migration + + Signed-off-by: Ram + +commit eeb4e468b4a5b30428615ab57dc0388f1c21a254 +Author: Luke Pitstick +Date: Fri Aug 5 14:08:53 2022 -0600 + + hold off on clusterpair port changes + + Signed-off-by: Luke Pitstick + +commit e7f47e6ad9ba6887719c0eb75eb29427a8f22f10 +Author: Luke Pitstick +Date: Thu Aug 4 16:56:37 2022 -0600 + + Vendor update openstorage + + Signed-off-by: Luke Pitstick + +commit 4f0688cc42c91656ae13c1648bc99d039c4d5305 +Author: Luke Pitstick +Date: Tue Aug 2 19:31:51 2022 -0600 + + Deal with clusterpair ports later + + Signed-off-by: Luke Pitstick + +commit 3949621aec5bfbc197c9096d6f97a9c156fc5ea7 +Author: Luke Pitstick +Date: Fri Jul 29 14:50:54 2022 -0600 + + vendor openstorage + + Signed-off-by: Luke Pitstick + +commit 393d44c93c5d34d4e53fde9a048bd3c423413c2c +Author: Mudassir Latif +Date: Tue Apr 20 02:09:28 2021 +0000 + + Stork should use the new secure port + + If tls is enabled, use tls.config generated by the openstorage + library helper + + Signed-off-by: Mudassir Latif + +commit c83f1e7b3f5c0e00f916fc018ce5f81bf52f140b +Author: Priyanshu Pandey +Date: Wed Aug 17 00:09:57 2022 -0600 + + PWX-26330: Disable px-object-controller by default. + + Signed-off-by: Priyanshu Pandey + +commit 27c04a2ab7eee15bdd4f09dbee8c6eda66e4aad6 +Author: sivakumar subraani +Date: Sat Aug 6 13:02:05 2022 +0000 + + pb-3000: Added debug statement in GetObjLockInfo api + +commit 30cda4f7559413a7b355231291aa12de51c04ae9 +Author: Lalatendu Das +Date: Wed Aug 10 07:38:04 2022 +0000 + + pb-3002: call v1 version CRD API for k8s 1.22 or more. + + Fixed a v1beta1 based getCRD call which will fail for k8s-1.22 or more + because these APIs are removed in k8s-1.22 onwards. + + Signed-off-by: Lalatendu Das + +commit 09d16ab442dc1074ae7fcb8fb349976355f2d25c +Author: Priyanshu Pandey +Date: Wed Aug 10 13:43:30 2022 -0600 + + PWX-26225: Error in starting px-object-controller should not throw fatal error. + + Signed-off-by: Priyanshu Pandey + +commit 1fbe01b49d18a03b33708bd38260954fa1cf1a97 +Author: Priyanshu Pandey +Date: Thu Aug 4 18:22:26 2022 -0600 + + PWX-26049: Vendor updated px-object-controller to fix cache initialization, delete error and multitenancy. + + Signed-off-by: Priyanshu Pandey + +commit 914167074236981785d8877f228248bcaf8bdce3 +Author: Priyanshu Pandey +Date: Thu Jul 28 19:34:28 2022 -0600 + + PWX-24682: Fixing static check issues. + + Signed-off-by: Priyanshu Pandey + +commit 0961a8a9baf9e4db540022dea6dd92ecd5e03ac0 +Author: Priyanshu Pandey +Date: Thu Jul 28 15:18:45 2022 -0600 + + PWX-24682: Vendor px-object-controller and start it to use px sdk server. + + Signed-off-by: Priyanshu Pandey + +commit c55ae997e904d27186195ffc9aabb27621c89321 (nfs-feature-master-branch) +Author: sivakumar subraani +Date: Thu Jul 21 08:05:49 2022 +0000 + + pb-2279: Added fixes to take care new EncryptionV2Key variable in + backuplocation. + + - Replace reference to EncryptionKey to EncryptionV2Key + - If decrypt function, assume it to be uncrypted and try using + data directly. + +commit 114ffbf85f353c2e5ffac0e576ce9ebfd1e9352b +Author: sivakumar subraani +Date: Tue Jul 12 05:46:26 2022 +0000 + + pb-2279: Added new variable for encryption key in backuplocation CR definition + +commit a35d4636f8aa5c7fe5bf335eb339a77887f994a6 +Author: Rohit-PX +Date: Wed Jul 20 23:05:16 2022 +0000 + + Rename webhook tests to be picked as part of extender tests + + Signed-off-by: Rohit-PX + +commit 221afd7223485afd9ab6db39dad999a0efd50de3 +Author: Neelesh Thakur +Date: Fri Jul 15 19:06:12 2022 -0600 + + PTX-10293: added tests for the webhook changes for virt-launcher pod + + Tests to verify that the virt-launcher pod will return "nfs" as + the file system type for regular and encrypted PX volumes. + + We use "kubevirt.io: virt-launcher" label to simulate the virt-launcher pod. + + Also, verify that the pods without the virt-launcher label will return + the correct FS type depending on it is a bind-mount or an nfs-mount. + + Signed-off-by: Neelesh Thakur + +commit 5c3c9b63fa1d89a41c212aef032447a9e435f910 +Author: Aditya Dani +Date: Fri Jul 15 16:27:08 2022 -0700 + + Add missing rancher labels to Service spec in rancher + +commit dd549f812cdb5bc38627ac24c18468684e480215 (origin/master_nfs_upload) +Author: Ram +Date: Tue Jul 5 21:06:49 2022 +0530 + + vendor updates for torpedo,schedops libs + + Signed-off-by: Ram + +commit 5d079db193a9592e1d838d557adc4cf133ca2688 +Author: Ram +Date: Tue Jul 5 21:05:04 2022 +0530 + + integration test for migration of endpoints, networkpolicy resource + + Signed-off-by: Ram + +commit ef512411ebc0467cb3f2ae9b3f33ca81c3eef4c4 +Author: Rohit-PX +Date: Tue Jul 12 23:40:08 2022 +0000 + + Vendor updates + + Signed-off-by: Rohit-PX + +commit c4447251aa35ac43c3db09eadb504fa2fb3a3ff9 +Author: Rohit-PX +Date: Fri Jul 8 18:12:37 2022 +0000 + + Add CBT suite to be run for every check-in to stork + + Signed-off-by: Rohit-PX + +commit 3e79dc8074385c0c40973549f6eb666fb8f950f6 +Author: Neelesh Thakur +Date: Wed Jun 29 13:07:11 2022 -0600 + + PWX-24637: mutate the virt-launcher container to intercept statfs() + + Live migration of KubeVirt VM fails if the VM is using + a bind-mounted sharedv4 volume. The root cause is that libvirt + uses a statfs() call to check the file system type and + incorrectly concludes that the volume is not shared. + + This patch addresses this problem as described below. + + We use a shared library px_statfs.so that intercepts libvirt's statfs call. + If the input path is backed by a PX volume, we change the file system + type returned by the real statfs call. This shared library is bundled with + the stork container. + + If a virt-launcher pod is being created and is using a PX volume, stork's + mutating webhook creates a ConfigMap in the pod's namespace. + + This configMap has 2 keys that represent the 2 files that we want to inject + into the virt-launcher container's /etc directory: + + - ld.so.preload + - px_statfs.so + + The stork webhook then mutates the virt-launcher pod's spec to mount + the configMap as a volume and inject the 2 files above in /etc dir on the + container's file system. This makes linux load px_statfs.so in the libvirt + process that is running inside the virt-launcher container and intercept + libvirts' statfs() call. + + Signed-off-by: Neelesh Thakur + +commit a0abc0790f8c47a61c7d985e6050ae5402eb4b74 +Author: sivakumar subraani +Date: Mon Jul 11 09:45:20 2022 +0000 + + pb-2904: vendored latest kdmp repo changes. + +commit 7cb8bb703e541c973d34dd9dec744b3cafac38b2 +Author: Ram +Date: Mon Jul 4 13:32:25 2022 +0530 + + Collect manually created endpoint resources for backup and migration + + Signed-off-by: Ram + +commit 02e473c7ac2d8735d3946db626811e92aef16bce +Author: Ram +Date: Fri Jul 1 09:46:49 2022 +0530 + + add support for endpoint object collection + + Signed-off-by: Ram + +commit 7bc6cb9cd0b1a5b3f619541ae6fe99d2f0a9b6e7 +Author: sivakumar subraani +Date: Sat Jul 9 03:48:16 2022 +0000 + + pb-2903: Added error handling for GetObjLockInfo api for cloudian + objectstore. + + - In the case of cloudian objectore, GetObjectLockConfiguration + api was returning ObjectLockConfigurationNotFound as error. + - So added the error check to handle ObjectLockConfigurationNotFound error as well + +commit 0dc4b5c3f9e51882f4aaa6422ed22c61db97db0e +Author: Aditya Dani +Date: Tue Jun 7 09:47:29 2022 -0700 + + PWX-24046: Add PlatformOptions to ClusterPair spec. + + PlatformOptions allow users to specify any configurations required for + kubernetes platform providers like Rancher / EKS / AKS etc + + Currently the PlatformOptions are only being used for Rancher. Each platform can define their + own spec within this PlatformOptions. + + RancherSpec: + - ProjectMappings: Allows users to configure source and target cluster projectID mappings + so that stork can apply the correct annotations on the destination project where it + is migrating the k8s resources. + - RancherSecret (FUTURE): Proposal for specifying a kubernetes secret to specify rancher api keys that would + be used to send REST APIs to Rancher endpoint for creating/deleting/getting project details + + NetworkPolicy / All Affinity referencing objects - Deployment/StatefulSet etc + - Parse a pod spec and if NamespaceSelectors are set, check if there are any rancher project labels + and replace them with the target project ID mapping. + + Pass resource collector options in every resource collector API + + - The resource collector instance is a common instance used in backup + and migration controllers. A common instance cannot dictate the options + used for different migration or backup objects. + + - Instead of a global map on the resource collector options use an actual + Options object and pass it as an argument to every API. + + PWX-24046: Integration test fixes to support Rancher Project Mappings + + - Currently the tests will run on vanilla k8s clusters but the + specs are simulated as if they were created for Rancher cluster by applying + project labels to them` + +commit f2ecc587a5498c509539dd7a017a649d53af74df (origin/pb-2279-new) +Author: Ram +Date: Thu Jul 7 19:24:59 2022 +0530 + + Remove deprecated go lint check + + Signed-off-by: Ram + +commit 6f3d4011761594a84e3f807aff737b9631cca195 +Author: Ram +Date: Tue Jun 14 18:55:20 2022 +0530 + + add options to collect all network policy + + Signed-off-by: Ram + +commit fc773c955dd296224b1c91a6c4d0193a36fc6131 +Author: Rohit-PX +Date: Wed Jun 29 00:23:44 2022 +0000 + + Add namespace to context so that restore gets created in the right namespace + + Signed-off-by: Rohit-PX + +commit 2b41fb3302fac2987e1033aba84d13ddba25086d +Author: Lalatendu Das +Date: Mon Jun 27 16:42:02 2022 +0000 + + pb-2867: fix restore path issue for CSI backups + + - fix the variable scope related issue in the restore logic of CSI backup. + - changed the switch case to have right type assertion check + + Signed-off-by: Lalatendu Das + +commit 567bac837623c1cf6d4c2384b68a909519e8f382 +Author: Lalatendu Das +Date: Fri Apr 8 16:01:50 2022 +0000 + + pb-2037: Making snapshot timeout configurable + + Added a configMap to change the local-snapshot timeout period. + This helps in addressing certain version of OCS cluster issues + wherein it takes genuinely more time + + Signed-off-by: Lalatendu Das + +commit 7c76e059bbd14a00086fcafa5d7f0a0e3ef64a72 +Author: Ozgur Gul +Date: Mon Jun 20 12:24:55 2022 +0100 + + Removed apos + +commit 03127f2704d19dbeb8cfaf71159054f54c97a282 +Author: Lalatendu Das +Date: Wed Jun 15 16:23:06 2022 +0000 + + pb-2836: Fixing some typecasting error related to v1beta1 volumesnapshot + + Wrong typecasting causes crash while duplicating backup and executing + other backup operations. Fixed by changing to correct typecasting. + + Signed-off-by: Lalatendu Das + +commit e1af2420fa3c94c43df760073ee4870b1fadbb7b +Author: Lalatendu Das +Date: Mon Jun 13 04:11:02 2022 +0000 + + pb-2328: vendored kdmp changes + + Vendored latest kdmp for supporting v1 & v1beta1 volumesnapshot CRD + + Signed-off-by: Lalatendu Das + +commit 78361560475e6722100b36d1c319f28154c00a0b +Author: Lalatendu Das +Date: Mon Jun 6 10:30:07 2022 +0000 + + pb-2328: Support both v1 & v1beta1 version of volumeSnapshot + + - Added support for V1 VolumeSnapshot APIs which is GA from + kubernetes 1.20 version. + + Signed-off-by: Lalatendu Das + +commit 7c9d3c0c2d63b57e606841ea0d3a2b86be227a7e (origin/master_sched_restore) +Author: Aditya Dani +Date: Tue Jun 7 19:16:00 2022 +0000 + + PWX-24000: Treat pure backend block volumes as Portworx volumes. + +commit 2836033676a1a6a205e495abffa0cf146342e37d +Author: Jindrich Zak +Date: Wed May 25 13:38:09 2022 +0200 + + Extract populating podNames. + +commit 879e0d32d1302a72c177cb859826eaefe74e2aff +Author: Jindrich Zak +Date: Wed May 25 13:06:37 2022 +0200 + + Improve variable names and comments. + +commit b83100db2f7204d12aee27c7830cfb8a959f4692 +Author: Jindrich Zak +Date: Mon May 23 17:05:14 2022 +0200 + + DS-2051: Add selector and namespace args. + +commit d38953726d0051f71e25af910357b9279e3b3dd3 +Author: Ram +Date: Tue Jun 7 21:46:08 2022 +0530 + + PWX-24245: Register VM object with suspendOption + + Signed-off-by: Ram + +commit 17183ee239b22f49faa79e67f076f66e7780bec1 +Author: Ram +Date: Mon Jun 6 22:57:06 2022 +0530 + + Do not list all k8s resources for volumeonly migration + + Signed-off-by: Ram + +commit dbe85f5ebd61b81ea9e7525590d5696afb619966 +Author: Luke Pitstick +Date: Fri Jun 3 10:26:32 2022 -0600 + + PWX-24190 Fix elapsed volume migration time when completed + +commit 19a108754d893579aa5fbadc8cabf23fef3a8532 (origin/siva-hack) +Author: ram-infrac <35250819+ram-infrac@users.noreply.github.com> +Date: Wed May 25 09:48:53 2022 +0530 + + Revert "support for k8s 1.22 kube-scheduler" + + This reverts commit e894216546860d5045d61a9de6e6bbcc7f1907c8. + +commit be647da3bb6dd51030be83ffcc6fa01f67254013 +Author: ram-infrac <35250819+ram-infrac@users.noreply.github.com> +Date: Wed May 25 09:48:53 2022 +0530 + + Revert "add namespace resource permission for stork-scheduler clusterrole" + + This reverts commit 506827a08711999512d473702116884e93a00a94. + +commit 2ee1892bf48a8d67324799fe58d68dc0c66507ac +Author: sivakumar subraani +Date: Mon May 23 10:03:24 2022 +0000 + + pb-2375: Fixed issue in handling the return value of GetObjectLockConfiguration call from FB and Dell ECS objecstore, when bucket is not enabled with lock + +commit 909dd2bfec71148ec982a3900a19682bc1d6f8e0 (origin/pb-2377) +Author: Ram +Date: Mon May 9 18:51:20 2022 +0530 + + PWX-23656 Support for removing field during activate/deactivate migration + + Signed-off-by: Ram + +commit 9f9672e04a290190cc499ee618740dbb4bb2a845 +Author: Ram +Date: Mon May 9 18:49:27 2022 +0530 + + PWX-23579: parallelize application activation/deactivation + + Signed-off-by: Ram + +commit 59d86634e3ede548e63a15d4f1118c6f3a6000c5 +Author: Serhii Aheienko +Date: Fri May 13 10:02:31 2022 +0300 + + cmdexecutor: use a unique wait.sh script name per a command + +commit 6dfb6c8797454cb24b38ccfa00c05e3813c5e579 +Author: root +Date: Thu May 12 05:20:15 2022 +0000 + + Update vendor + +commit 930954abd922ef4927bf18f2f0d9a670358dc313 +Author: Aditya Dani +Date: Wed May 11 22:11:43 2022 -0700 + + PWX-22627: Add support for IAM role in BackupLocation + +commit ff41727c85c3b116cfa8c771c534694c2242318b +Author: Andrei Kvapil +Date: Thu May 12 22:15:57 2022 +0200 + + linstor: support for WaitingForFirstConsumer + +commit b0d15e0142aaf54801914df04545c67ef61994fb +Author: Luke Pitstick +Date: Thu May 12 11:11:33 2022 -0600 + + PR feedback and testing + +commit 5cfeb4a07ad890495393e55eba8171ee2e796095 +Author: Luke Pitstick +Date: Wed May 11 15:40:01 2022 -0600 + + PWX-23693 Allow scheduling of pods with pending pvcs due to WaitForFirstConsumer + +commit c5f1df8c0627b8d145811d3d625ca38183320691 +Author: Ram +Date: Wed May 11 17:23:59 2022 +0530 + + pwx-23582: VM object migration support + + pwx-23657: link datavolume and pvc object during migration + pwx-23658: avoid ownerref resources for vm object + + Signed-off-by: Ram + +commit 5e8179bd1548d4ff7309270b98fe36c51ba4cc0d +Author: Aditya Dani +Date: Wed May 11 11:34:59 2022 -0700 + + PWX-23703: Explicitly pass migration taskID to get the CloudMigrateStatus for Portworx driver + +commit 506827a08711999512d473702116884e93a00a94 +Author: Ram Suradkar +Date: Mon Nov 29 14:22:37 2021 +0000 + + add namespace resource permission for stork-scheduler clusterrole + + Signed-off-by: Ram Suradkar + +commit e894216546860d5045d61a9de6e6bbcc7f1907c8 +Author: Ram Suradkar +Date: Fri Nov 26 10:53:23 2021 +0000 + + support for k8s 1.22 kube-scheduler + + Signed-off-by: Ram Suradkar + +commit 3fce175ced10464c5bea9336ad0b258322cfd7c5 +Author: Rohit-PX +Date: Tue May 3 07:43:25 2022 +0000 + + Skip deleted namespaces from a migration schedule + + Signed-off-by: Rohit-PX + +commit 3cce9702fa805ef6d3f0f910d91fd2cc20ba8a8c +Author: sivakumar subraani +Date: Wed May 4 08:00:39 2022 +0000 + + pb-2081: Added retry logic to wait for the volumesnapshot status update, before accessing restoreSize. + +commit fdce2656c89e85bbc99bd7228fa65bc1bc270fb6 +Author: Lalatendu Das +Date: Tue May 3 10:15:49 2022 +0000 + + pb-2371: Handle non-existent bucket scenario for object-lock + + - Handle error code "NoSuchBucket" while fetching object lock info for a + AWS S3 based bucket. + - Fixed go version to eliminate travis build failure. + + Signed-off-by: Lalatendu Das + +commit 80663f52424749aeb42d8511bbabd2ff95263bcc +Author: sivakumar subraani +Date: Mon May 2 10:42:07 2022 +0000 + + pb-2081: In kdmp case, resetting the Datasource field of pvc for restore. + +commit fc9a311052adbb9f0b4c003cd53113fd04725835 +Author: Ram +Date: Wed Apr 27 19:02:05 2022 +0530 + + PWX-23581: print stork leader deatails in logs + + Signed-off-by: Ram + +commit 4fc84928a3670580addc5286a0d9014ca53bacdc +Author: Ram +Date: Wed Apr 27 21:59:38 2022 +0530 + + [portworx] update delete api calls with context + + Signed-off-by: Ram + +commit 37724f3f4b33c6a97dd7e9c71e5c4d982c03d9d7 +Author: Ram +Date: Wed Apr 27 21:58:50 2022 +0530 + + openstorage vendor updates + + Signed-off-by: Ram + +commit 46e4721a492f3a1a452d3debf9ed1c8927272d74 +Author: diptianjan +Date: Thu Apr 28 17:14:55 2022 +0530 + + PB-2365: Use pvcUID instead of name while creating dataexport CR during generic backup cleanup. + +commit 4a7ba30a06ef04bde1cf7fe7b1e72b8b358d2cac +Author: sivakumar subramani +Date: Tue Apr 26 04:48:40 2022 -0400 + + pb-2360: Using GetStorkPodNamespace api for cmdexecutor image extraction as well. + + - This take care of the usecase even if stork is deployed on non kube-systen namespace + +commit e9c353b483189926a742b1b44a1021512522b7c1 +Author: sivakumar subramani +Date: Tue Apr 26 01:10:22 2022 -0400 + + vendoring latest kdmp changes from master branch + +commit 74d55834aed7dc76e43572351fa3da3059e0c778 +Author: sivakumar subramani +Date: Sun Apr 24 04:50:48 2022 -0400 + + pb-2330: vendoring latest kdmp repo from master branch + +commit d535c7ad80183298d903720b4c1d36f4881b65cd +Author: sivakumar subramani +Date: Sat Apr 23 17:48:03 2022 -0400 + + pb-2330: remove the portworx repo name from default defaultCmdExecutorImage to support custom repo + +commit 67a73e91b0b4ae888a75c379bd1f4e9951b7413b +Author: sivakumar subramani +Date: Sat Apr 23 01:17:49 2022 -0400 + + pb-2330: Made fix to support image name to have custom repo as well for cmdexecutor image + +commit da41de4d9b056272fc0e45d64708c7435ae31493 +Author: Lalatendu Das +Date: Thu Apr 21 13:45:57 2022 +0000 + + pb-2324: CSIDriver V1 version API needed for k8s 1.22 and beyond + + CSIDriver V1beta1 API support is removed from k8s1.22 verson. + This caused certain DS to initiliazed nil and caused crash for CSI based + backup. Added adequet check and called appropriate APIs of CSI driver. + + Signed-off-by: Lalatendu Das + +commit 5b345b98e8eee3aebde6e8c3050a5d726f81c93b +Author: Rohit-PX +Date: Fri Apr 15 13:18:16 2022 -0700 + + Explicitly pass token to generate cluster pair method + + Signed-off-by: Rohit-PX + +commit bb2f19719160564e8d44114cad8d4b10027dd106 +Author: Lalatendu Das +Date: Thu Apr 21 04:56:29 2022 +0000 + + pb-2298: minimum retention period added to error msg of failed backup + + When a scheduled backup failed due to insufficient retention period then + user need to be aware of minimum retention period to be set via error + message. + + Signed-off-by: Lalatendu Das + +commit 192d0a46a62a52b0c1b9b71efe507bc48f57c0a9 +Author: Lalatendu Das +Date: Wed Apr 20 17:11:56 2022 +0000 + + pb-2325: Set retention period for object-locked Failed backup + + The scheduled failed backup which is created for object-locked bucket + need to set retention period appropriately. This helps px-backup to + delete them when auto-delete flag enabled in px-backup. + + Signed-off-by: Lalatendu Das + +commit 4e0ceaba2bc7b0404a18203f51247a802ad4f0c7 +Author: sivakumar subramani +Date: Wed Apr 20 04:13:05 2022 -0400 + + pb-2323: vendoring latest kdmp repo from master branch. + +commit 7f25dd83e0ff21878cc6af64d921c92256e8fd1d +Author: sivakumar subramani +Date: Sun Apr 17 04:35:06 2022 -0400 + + pb-2292: vendor kdmp repo from master branch + +commit 22745ed2d3f0e64f91cc75b98b693ca57cead250 +Author: sivakumar subramani +Date: Fri Apr 15 13:21:46 2022 -0400 + + pb-2293: add logic to extract registry that container extra directories + with in it for rule cmd executor. + +commit 3c18fd21ed95d6fac6840459b45c64bc799fadbe +Author: Kesavan Thiruvenkadasamy +Date: Thu Apr 14 13:52:51 2022 +0530 + + Modify to use helper method from component helpers package + + Signed-off-by: Kesavan Thiruvenkadasamy + +commit 94aaa8e65871f3dd01d6098797522f61da796ab0 +Author: Kesavan Thiruvenkadasamy +Date: Thu Apr 14 13:52:11 2022 +0530 + + Fix tests due to change in GetClusterPairingInfo parameters + + Signed-off-by: Kesavan Thiruvenkadasamy + +commit b74d6b851d331c4b4c3d5694fe887bc8a41ee30b +Author: Kesavan Thiruvenkadasamy +Date: Thu Apr 14 12:53:22 2022 +0530 + + Update vendor files + + Signed-off-by: Kesavan Thiruvenkadasamy + +commit a10cbd1a3b0eb50e9c3f5427e5f7cf9a07374439 +Author: Kesavan Thiruvenkadasamy +Date: Thu Apr 14 10:36:37 2022 +0530 + + Update go.mod to support kdmp vendoring changes + + Signed-off-by: Kesavan Thiruvenkadasamy + +commit 20b65fd654eb7e9292ca7b901948a8b2053a8161 +Author: Luke Pitstick +Date: Tue Apr 12 14:48:51 2022 -0600 + + PWX-22676 wait for extender to be ready + +commit 314500a3005a8ee8049b1a41e9cd252ac72d4c72 +Author: sivakumar subramani +Date: Mon Apr 11 17:47:33 2022 -0400 + + pb-2288: ebsVolume.Size is size in GiB, So converting it to bytes. + +commit e37c9a9167c803d8ab58516a3beb11f8711c8b9d +Author: diptiranjanpx +Date: Thu Mar 31 10:13:16 2022 +0000 + + [PB-2270]: Initializing cred values from backuplocation only if cluster secret is provided. + +commit 7cc7d37aee9ff9ab8ecf41dd738382e60246c8d4 +Author: sivakumar subramani +Date: Thu Mar 31 16:43:53 2022 -0400 + + pb-2266: Adding backupType in the applicationbackup CR created by + schedule backups. + + - Added fix in Makefile to address the staticcheck failure. + +commit 239ea6d7b67b7639c79068f8081827798f219ef1 +Author: Lalatendu Das +Date: Tue Mar 29 11:16:53 2022 +0000 + + pb-2267: fixed issue in object lock configmap name + + Changed the name of object lock configMap from stork-objLock-config to + stork-objlock-config. This is to avoid any capital letter in it. + + Signed-off-by: Lalatendu Das + +commit 856c442b010aa198927172c5b1db598d93945722 +Author: Ram Suradkar +Date: Tue Mar 29 06:54:05 2022 +0000 + + Upgrade openssl libs + + Signed-off-by: Ram Suradkar + +commit 55772e1191e22bfdaf786e8df3119b7d2c06b32b +Author: diptianjan +Date: Wed Mar 16 23:04:40 2022 +0530 + + [PB-2194]: backuplocation object to have crdential info associated with the cluster. + +commit f2a6dae7ef2862ebe1a12aa9ba50bc0baf134a31 +Author: Lalatendu Das +Date: Sun Mar 27 20:35:09 2022 +0000 + + pb-2259: Fail the schedule-backup for an invalid retention period + + - Created a configMap related to object lock which can be used for altering + the incremental count of schedule backups. + - Implemented the following logic, if the retention period is altered in + between two scheduled backup, then backup will fail for an invalid + retention period + + Signed-off-by: Lalatendu Das + +commit 6f84565113d2f75bb868e6b13a250d15a2774972 +Author: sivakumar subramani +Date: Wed Mar 23 15:43:16 2022 -0400 + + pb-2260: Return default objLockInfo struct instead of error for gke and azure bucket in GetObjLockInfo + +commit 6d25cd654c5f4e4308e885fb87b343418146fbf5 +Author: Ram +Date: Mon Mar 21 19:59:50 2022 +0530 + + Check empty RemotepairID before deleting clusterpair + + Signed-off-by: Ram + +commit 8b23fb4c6900f6d5a8ea7ee0974ecae1a634efa3 +Author: Ram +Date: Wed Mar 16 22:42:56 2022 +0530 + + PWX-23244: Allow pods using FA/FB pvcs in scheduling decisions + + Signed-off-by: Ram + +commit 4dc0126f48f84b0b06267bf2b25e6769eca26650 +Author: sivakumar subramani +Date: Sun Mar 20 13:55:22 2022 -0400 + + pb-2236: Added logic to force full backup for locked period backup from schedulebackup + + - Starting of every day time slot, forcing to have first backup of schedulebackup to + full backup always. + - This is to take care, if the incremental backup was overlapping between two day's + time slot and avoid having incremental backup as first backup in starting of day. + +commit 035c2a6f1f1c212e8d5b849d73fc8c7eae42aaa8 +Author: sivakumar subramani +Date: Thu Mar 17 14:02:28 2022 -0400 + + pb-2241: fixed a nil pointer access in GetObjLockInfo function. + +commit 12a3debbfa7fdaa597e2f41959e8d6f81e854118 +Author: Luke Pitstick +Date: Tue Mar 15 02:04:11 2022 -0600 + + PWX-23212 Treat StorageDown nodes as degraded instead of Online + +commit 0f598651a156e0cc45d0e42ea045af73d7f377ac +Author: Ram +Date: Wed Feb 16 22:36:25 2022 +0530 + + Upgrade torpedo scheduler calls + + Signed-off-by: Ram + +commit aa8760e4ab2bebe5dd4f6dd05e7731259deb42a7 +Author: Ram +Date: Wed Feb 16 22:34:42 2022 +0530 + + Update px vendor libs + + - torpedo - master + - openstorage - master + + Signed-off-by: Ram + +commit a9c84cc30c0026ce126ce478858c2f2e2190431f +Author: Ram +Date: Tue Mar 15 12:29:12 2022 +0530 + + PWX-23231: Ignore completed migration while checking inactive cluster domain + + Signed-off-by: Ram + +commit 0413d4fc78d4c2a0469c6cc15cbef5b481ec3dc5 +Author: Ram +Date: Fri Mar 11 00:12:16 2022 +0530 + + PWX-23026: Check if underlying storage pair is used by another clusterpair + + Hold deletion of clusterpair if another clusterpair exists which is using + same storagepair + + Signed-off-by: Ram + +commit 64e7ec07c1c7444155f40291d8634c500fd215b4 +Author: sivakumar subramani +Date: Sun Mar 13 01:41:38 2022 -0500 + + pb-2179: Added changes required for the supporting object lock for backupschedule deletion. + + - Adding the "object-lock-retention-period" annotation to the + applicationbackup CR created by applicationbackupschedule in stork + - This retention period annotation value will be used in the backupscync + logic to update the retention period of the synced backup object. + - Added GetObjLockInfo in stork objectstore as it needs to be called in + stork as well as px-backup + +commit 7cb7774a5b7eb3d10b2f9ad64528465a660ad222 +Author: Aditya Dani +Date: Sat Mar 12 02:20:28 2022 -0800 + + PWX-23215: Set the elapsed time in MigrationSummary only when required. + + - Only if a resources or volumes are getting migrated then set their + respective elapsed times. + +commit a2172901c210d8fce60381b485b256788f72a173 +Author: Aditya Dani +Date: Sat Mar 12 02:05:46 2022 -0800 + + PWX-23317: Raise an event if clusterpair not found in Migration + + Signed-off-by: Aditya Dani + +commit b9e921fcf993b440b61034019a86ad79b661f27b +Author: sivakumar subramani +Date: Thu Mar 10 00:25:33 2022 -0500 + + pb-2219: Add changes to use the registry name and image secret for kopia + executor from stork deployment spec. + + - Add GetStorkPodNamespace to get the stork pod namespace, even + if it is not installed in the kube-system. + +commit 880ec6d853bc095265ade4391e1bdcc5369f14d2 +Author: sivakumar subramani +Date: Wed Mar 9 23:23:49 2022 -0500 + + pb-2219: vendor changes for latest sched-ops + +commit 7c320e7123d4b7540b96ac0311756666f0acc315 +Author: sivakumar subramani +Date: Wed Mar 9 10:01:08 2022 -0500 + + pb-2219: vendoring latest kdmp changes + +commit 3dc2fcb4f165e2c03917d76099aef979219ad88d +Author: Ram +Date: Fri Mar 4 16:48:21 2022 +0530 + + allow to run complete integration suite + + Signed-off-by: Ram + +commit 5670d51b3f82a8c9f2de0af8a3acce2be6f41cef +Author: Luke Pitstick +Date: Wed Mar 2 10:28:19 2022 -0700 + + PD-1108 update app spec example + +commit 36fbc08b8080b8394cf42d6d6350a5e8bf18dd14 (origin/pb-2196) +Author: sivakumar subramani +Date: Sun Feb 27 10:17:31 2022 -0500 + + pb-2172: Added fixes related to handling of rule command executor image. + + - Added CMD-EXECUTOR-IMAGE-REGISTRY and CMD-EXECUTOR-IMAGE-REGISTRY-SECRET environment variable + to get the custom regitry name and secret for rule command executor pod image. + - Added the annotation to get the image registry secret value, + if some one pass the custom image registry value as annotation in the rule CR. + - Added logic to take the image registry name and secret value + from the stork deployment if the both the env and rule annotation is missing. + - Also, add a fix to fail the backup and delete the rule command + executor pod, if the rule command executor pod is struck in Pending phase for more + than five minutes. Some times, it gets struck in pending state, + if there is any issue with the image registry or secret value. + - Minor fix in printing a err in handle function. + +commit 7ad5271f34accb5fd7e06c5a3232cd2d84296a0c +Author: Ram +Date: Mon Feb 21 22:35:17 2022 +0530 + + [integration-test] increase nodeoffline timeout to avoid race + + Signed-off-by: Ram + +commit 5651b078a3bb7d6d76461d2d4bf5306cc6ac2839 +Author: Ram +Date: Mon Feb 21 22:34:26 2022 +0530 + + PWX-22971: application clone failing at resource stage + + - get volume driverName by looking at pv object + in case of non backup/restore prepareresource calls + + Signed-off-by: Ram + +commit 021fc3c60b10c3600d39ee2401d616e246741deb +Author: Lalatendu Das +Date: Thu Feb 24 03:03:33 2022 +0000 + + pb-2178: Obtain object-lock info for S3 bucket + + Added a function to objectstore package to extract object lock info from + a S3 bucket. + + Signed-off-by: Lalatendu Das + +commit 950cc24775fd742ea5decc2952d87868103b74da +Author: Andrei Kvapil +Date: Tue Feb 8 00:54:39 2022 +0100 + + Format log for linstor driver + + Signed-off-by: Andrei Kvapil + +commit 6479dc4086383dea472f00532ec2574db38d11f1 +Author: Andrei Kvapil +Date: Wed Feb 2 00:44:05 2022 +0100 + + Update golinstor module + + Signed-off-by: Andrei Kvapil + +commit ecb55920e63ede800b4af9492e3e7b76e7921a08 +Author: Ram +Date: Thu Feb 17 11:29:29 2022 +0530 + + pwx-22958: fix failing integration tests + + pvcResizeTest - wait for next migration schedule to trigger + driverNodeTest - Adjust pods reschdule time with updated health monitor timeout + pvcOwnershipTest - use correct storage-provisioner annotation + + Signed-off-by: Ram + +commit b9773cd69de90aa418a6d1666ce43ca3285ab92c +Author: Rohit-PX +Date: Sun Feb 13 09:35:54 2022 -0800 + + Add RabbitMQ operator migration test + + Signed-off-by: Rohit-PX + +commit e3077b75414649fbd329988a205599bc4b48a96a +Author: Ram +Date: Mon Feb 7 18:18:03 2022 +0530 + + Support for activate/deactivate nested server with CR + + Signed-off-by: Ram + +commit 97078febb7be8bf2975223af55c0f782f98d0e0b +Author: Ram +Date: Mon Feb 7 18:17:38 2022 +0530 + + Allow multiple disable path options for appreg + + Signed-off-by: Ram + +commit f6567f866498e703b4b3f77c9528c0383e1f8b6b +Author: Aditya Dani +Date: Mon Feb 14 10:17:10 2022 -0800 + + STOR-479: Add separate elapsed times for Volumes and Resources. + +commit ecbf6806f8495a73f6e570f666996318c565dac5 +Author: Aditya Dani +Date: Fri Jan 28 16:04:52 2022 -0800 + + STOR-479: Add MigrationSummary to Migration CR. + + Stork + - MigrationSummary provides a short summary on what objects got migrated as a part + of a migration cycle. It includes the following: + - total number of k8s resources expected to be migrated + - actual number of k8s resources successfully migrated + - total number of volumes expected to be migrated + - actual number of volumes successfully migrated + - total number of bytes transferred + - Portworx driver will report the total number of bytes transfered for each volume. + - The Migration summary will provide a sum of the total bytes transferred. + + Storkctl + - Add total bytes transferred column to get migrations output + - Modify UTs + + Integration Tests + - Validate Migration summary is updated in the TestMigrationFailoverFailback test. + +commit 93d78dd8cce75f056d1731d1060f160664daea56 +Author: Aditya Dani +Date: Fri Feb 11 15:30:38 2022 -0800 + + PWX-22606: Give low scores to nodes which are in Degraded state. + + - A node if in Degraded state will return half of the score they would + originally return based on the node/zone/rack/region priority. + - If a volume has a replica on a node but the node is in Degraded state + stork will give it a score of rackPriority / 2. This is done since + a pod running on that node is not really hyperconverged and should be + score similar to any another node in the same rack. + - Added UTs for scenarios where a node needs to be scored based on + zone/region/node/rack but is in Degraded state. + +commit c96b617b194f8b39ef53c78d7d4a0d2844de84bb +Author: Ram +Date: Mon Feb 14 13:32:13 2022 +0530 + + Update sched-op apis update changes + + Signed-off-by: Ram + +commit 25f03b2f96683c02ea3cd7e8e089fc21d77d7d81 +Author: Ram +Date: Mon Feb 14 13:31:46 2022 +0530 + + Update sched-ops vendor to master + + Signed-off-by: Ram + +commit 7565de60dc1fbd5b538520f52b7ed83fa55d49eb +Author: Ram +Date: Wed Feb 9 20:52:54 2022 +0530 + + Add autosuspend option to migrationschedule object + + - auto-suspend will allow to stop migration on src cluster + if remote cluster has activated migrated application + - fix review comments + + Signed-off-by: Ram + +commit b513b6b7e34cfde918dc5ae445cc0d4c38644def +Author: Ram +Date: Wed Feb 2 22:33:45 2022 +0530 + + integration test for auto-suspend migrationschedule + + Signed-off-by: Ram + +commit b26bf18c95b489ae40676b4e63c2ffca8cc191f3 +Author: Ram +Date: Thu Nov 11 22:32:02 2021 +0530 + + add migrationschedule name annotation to migration objects + + - Dont scale down apps automatically on primary cluster + - Check and create namespace on DR cluster for migrationschedule + + Signed-off-by: Ram + +commit b8161121037cfa530e049bec847b05634c742310 +Author: Ram +Date: Thu Feb 18 10:52:59 2021 +0530 + + Disable migration if dr site apps are active + + - stor340: Redo cluster-pairing automatically + - storkctl deactivate will also set migrationschedule appactivate field + - vendor update sched-ops + + Signed-off-by: Ram + +commit 1b6fbe92dbdb86af46d4f29a1913122647db586a +Author: Rohit-PX +Date: Fri Feb 11 03:16:53 2022 -0800 + + Torpedo vendor changes for CRD support + + Signed-off-by: Rohit-PX + +commit 44fe3c71f1c9cd6fdd2d0e54e8921c6dccdffe5f +Author: Aditya Dani +Date: Tue Jan 25 17:00:15 2022 -0800 + + STOR-484: Increase the health monitor offline node timeout to ~4 minutes. + + - The health monitor in STORK currently waits only for a minute after it detects + storage driver node as offline. Changing this timeout to ~2 minutes. Stork will + continue to poll for offline nodes every 2 minutes. So the max time a pod + running on an offline node to get deleted will be 4 minutes. + - A storage driver if restarting due to an upgrade can take upto 3 minutes and + STORK could cause an unnecessary restart to the application pods. + +commit 32685418fb8895c0fdfd930f3a7b22c7bc08cac0 +Author: Luke Pitstick +Date: Wed Feb 9 08:38:57 2022 -0700 + + PWX-22607 Add annotation to disable hyperconvergence prioritizing + + PWX-22609 Change webhook controller default to true + +commit c87fbac8704ba86c2b0b4b3a66ca72bdb1ce60b0 +Author: Rohit-PX +Date: Wed Feb 9 08:17:24 2022 -0800 + + Replace sysbench with fio + + Signed-off-by: Rohit-PX + +commit 27f15651b86df1fae49b7d38c255d372d36c3658 +Author: Ram +Date: Thu Feb 10 10:55:38 2022 +0530 + + Don't upload storkctl binaries to s3 bucket + + - Users can copy storkctl binary from stork pod itself + + Signed-off-by: Ram + +commit a91c1d9f36773830f0e5b5ec4ee4e36ece2fa633 +Author: Ram +Date: Mon Feb 7 18:52:53 2022 +0530 + + Update PerconaXtraDBCluster suspend option to gracefully shutdown cluster + + Signed-off-by: Ram + +commit 610beced0c6f61bb2621b63bf4172156b593c961 +Author: Ram +Date: Tue Feb 8 12:35:11 2022 +0530 + + Mark snapshot status as failed if manually deleted by user + + Signed-off-by: Ram + +commit be77c700bab9910edefe9f3495d65f7a99374df0 +Author: Ram +Date: Mon Feb 7 23:23:00 2022 +0530 + + Always sync volumesnapshotschedule and volumesnapshot status + + Signed-off-by: Ram + +commit 5911c1dfd7d2726701cd87f0cad3fda9c4ca91da +Author: Aditya Dani +Date: Fri Jan 21 11:19:53 2022 -0800 + + STOR-577: Portworx Driver: Do an explicit not found check in DeletePair + + - Change the error condition to do a string check on "not found". + +commit 58e441bdcdb6bef965754ea09e8e6c786c8594a2 +Author: Rohit-PX +Date: Tue Feb 1 05:52:17 2022 -0800 + + Separate out migration and snapshot tests and run all tests + + Signed-off-by: Rohit-PX + +commit aa43b560025eddd304b7451b83b6f92edafa81b5 +Author: Rohit-PX +Date: Tue Feb 1 06:56:02 2022 -0800 + + Migration test for mongo operator + + Signed-off-by: Rohit-PX + +commit 765e8957c86b475590e59dfa5e1f3fed8171d2df +Author: Ram +Date: Fri Jan 28 17:08:20 2022 +0530 + + integration test to validate pvc resize after migration + + Signed-off-by: Ram Suradkar + +commit cd44606a7ae9ffd35f09fd35e705849237b2f9e0 +Author: Ram +Date: Tue Jan 25 17:19:33 2022 +0530 + + keep storage class in pvc spec for migrated pvcs + + Signed-off-by: Ram Suradkar + +commit daf8f93a88ba71fbf05f65bc1273fff604124220 +Author: Luke Pitstick +Date: Tue Feb 1 15:59:11 2022 -0700 + + PR Feedback + +commit 1f7daadb5770bcab336d6531216b3417c14724f9 +Author: Luke Pitstick +Date: Fri Jan 28 09:59:31 2022 -0700 + + Refactor + +commit e2f0a0626cfa75eebc60691186bd013e0f77a74a +Author: Luke Pitstick +Date: Wed Jan 26 10:36:53 2022 -0700 + + Scramble object order + +commit 8fc1f3b8777a3da4f999ad42af635e2a2e757fa8 +Author: Luke Pitstick +Date: Wed Jan 12 11:27:22 2022 -0700 + + STOR-573 apply updatedObjects in parallel during migration + +commit 0928646b2c4cc578af379e7c307cce7180a600a8 +Author: Ram +Date: Tue Jan 25 17:49:39 2022 +0530 + + Fix CVEs reported by dependabot + + Signed-off-by: Ram + +commit 998683f66f18eba1281ad99a9e370d5dc2f163c3 +Author: Rohit-PX +Date: Fri Jan 21 03:49:54 2022 -0800 + + Add support for openstorage.portworx.io as jwt issuer + + Signed-off-by: Rohit-PX + +commit 46d20f921bfed040ec1a77395b80f8dc6de4deac +Author: sivakumar subramani +Date: Mon Jan 17 03:02:15 2022 -0500 + + pb-2162: vendor changes for the kdmp master branch + +commit 0fb31b7231a46cfafaf3b0e274b6b2cdd2278f5b +Author: sivakumar subramani +Date: Mon Jan 17 00:11:35 2022 -0500 + + pb-2162: Added fixes for backup/restore on ocp environment. + - Including system:openshift:scc rolebinding in the backup + - Added privileged scc for job role. + +commit 532949ca566da7252178d1d64a157e9990cc6513 +Author: diptianjan +Date: Mon Jan 17 09:29:40 2022 +0530 + + Vendoring in from latest kdmp. + +commit af871c01650a1346f6fad5e5a709581513e6069b +Author: diptianjan +Date: Sat Jan 15 09:36:27 2022 +0530 + + Addressed the review comments. + +commit 97b8e4a58046075759f4894ad5853e2d5fd133f0 +Author: diptianjan +Date: Wed Jan 12 12:38:25 2022 +0530 + + [PB-2148]: Implementing job framework to make pvc bound in case of waitforfirstconsumer case. + +commit 6a8c69b4c08ffc2607715a955ecbddadcbf74e08 +Author: Rohit-PX +Date: Tue Jan 4 07:31:23 2022 -0800 + + Integration test to delete stork pods on destination during migration. + + Signed-off-by: Rohit-PX + +commit 8ac87e611313c709628b9b84581b50302874e624 +Author: Prashanth Kumar +Date: Thu Jan 13 00:46:52 2022 -0500 + + Skipping zone checks for EFS provisioner for generic backup/restore paths + +commit a9f23af1c447c143dd2deeb09f291e3bd1cdf58f +Author: sivakumar subramani +Date: Wed Jan 12 14:29:27 2022 -0500 + + pb-2157: retaining the rolebinding subjects if the namespace is not set during restore. + +commit 40a08b489d40baa6c205f91af32ce3dc8932ee28 +Author: sivakumar subramani +Date: Mon Jan 10 02:30:20 2022 -0500 + + pb-2113: vendor change for kdmp from master branch. + +commit 6f415f0c02cf5264d7f643b28eb191219a5ad7f9 +Author: Prashanth Kumar +Date: Thu Jan 6 00:40:13 2022 -0500 + + Multi zone support for aws/gke + - As part of kdmp restore, it is made sure that kdmp job pod + and the restore PV's are created in the same zone + +commit 26cf2a3f08e89f27af4aebe3d9371a2638aaf305 +Author: sivakumar subramani +Date: Sat Jan 8 05:20:32 2022 -0500 + + pb-2156: Added logic to default to KDMP for OCP rdb and cephfs provisioner. + +commit 25a9fe73f69bf48e434754984719c3ea4109973c +Author: sivakumar subramani +Date: Fri Jan 7 02:22:37 2022 -0500 + + pb-2149: vendor changes for kdmp from master branch + +commit a872471fa9c3e1f3975bdebee27f3449239c36d1 +Author: Aditya Dani +Date: Mon Jan 3 13:39:27 2022 -0800 + + Raise an event if cluster pair delete fails. + + - Do not remove the finalizer if cluster pair delete fails from the driver's perspective. + - Retry the operation in the next reconcile loop. + +commit a89660147f853ce6903276fc23603725e085cfde (origin/pb-2011) +Author: sivakumar subramani +Date: Tue Jan 4 14:20:49 2022 -0500 + + pb-2151: Added check to make sure zone array is not empty in preparePVCResourceForApply + +commit 553ab59745655eef724d93394b2cdbe8c1d006c8 +Author: diptianjan +Date: Fri Dec 24 09:42:27 2021 +0530 + + [PB-2143]: Making pvc size same as csi snapshot size to avoid the clone failure. + +commit 40510a2cf18ea1350235a4c0e52c9600830fddba +Author: sivakumar subramani +Date: Tue Dec 28 23:01:39 2021 -0500 + + pb-2118: Added changes to support cross-region backup in native GKE + driver. + + - Update the logic take the destination zone and region in the + StartRestor function. + - Updating the PVC resource with nodeselected annotation form + the destination cluster based on the region/zone selected for + restore. + +commit d92c49f0fce3cf7cc767185858a3455f0b7bdfde +Author: sivakumar subramani +Date: Fri Dec 24 10:41:11 2021 -0500 + + vendor changes for kubernetes pkg + +commit 35c1a8a55fe67e0a260ccce178158bf618f7e234 (origin/siva-zone-map, origin/siva-tz, origin/siva-reconcile-dec19, origin/siva-gke-cross-reg, origin/master_aws_cross_region) +Author: sivakumar subramani +Date: Fri Dec 17 09:07:27 2021 -0500 + + pb-2131: vendor latest kdmp from the master branch. + +commit 87099e7fa665325ea1f210177a2691c3fbce3478 +Author: diptianjan +Date: Mon Dec 13 17:28:23 2021 +0530 + + PB-1079: Don't list the related clusterrole and clusterrolebinding of a serivce account + if user does not have permission for that namespace. + +commit bf7971a45ad320fda421caf8233af4abacf70812 +Author: Luke Pitstick +Date: Wed Dec 15 17:41:10 2021 -0700 + + STOR-516 only try to create a clusterpair if a token is provided + +commit 52d82cd78bbc2a53b0e4d9840153b908794f923e +Author: Prashanth Kumar +Date: Thu Dec 16 00:18:18 2021 -0500 + + Passing kdmp-config map name for kdmp job delete + +commit 97686573a69cf4ba783f1ba5a6a876274ce7ed8e +Author: Prashanth Kumar +Date: Thu Dec 16 00:18:59 2021 -0500 + + vendor update for kdmp + +commit b7e24a0a132d74859b615477cbe4fb37fab832ad +Author: diptianjan +Date: Thu Dec 16 00:22:11 2021 +0530 + + [PB-2133]: Checking for waitforfirstConsumer if pvc's storage class has that set as volumebindingmode. + +commit 43d8125b338591ad7d467b4f528396425e4be80e +Author: Prashanth Kumar +Date: Tue Dec 14 08:12:45 2021 -0500 + + As part of restore if a user selects few PVC's to be restored, filter the + non-selected PVC's from includeResource map. This is applicable for + the case where Retain is selected. + +commit fc184ff7afb8a367663ac65beb4614d91370a4e6 +Author: Aditya Dani +Date: Mon Dec 13 14:21:15 2021 -0800 + + Set the new storage class on the PVC during ApplicationRestore + + - If the storage class mapping is set on ApplicationRestore on the PVC's + source storage class is found in this mapping, then on the destination cluster where + the PVC is being recreated set the new storage class obtained from this mapping. + + Signed-off-by: Aditya Dani + +commit b928b4459c336c0b55f869a42a0a5d1841abd090 (origin/pb-2126) +Author: Prashanth Kumar +Date: Tue Dec 14 01:15:14 2021 -0500 + + vendor update for kdmp + +commit 7fc6acdc30fa79b0fdd6e50ac80e5028722a0df9 +Author: sivakumar subramani +Date: Mon Dec 13 06:37:57 2021 -0500 + + pb-2125: Added check for nil SC in getRestorePVCs + +commit b7e52e497c1cc39402b7cae9962c05843830c784 +Author: sivakumar subramani +Date: Sat Dec 11 11:35:09 2021 -0500 + + pb-2113: Setting storageclass to nil in pvc, if it is empty. + + - The default storageclass configured on the setup, will be + picked up, only when the storageclass is not set. + - Emptystring as pvc storageclass will not select + the default storageclass configured on the cluster. + +commit 3e09549170c56aa5ca4bb55ea631a1b9fd65ef98 +Author: sivakumar subramani +Date: Sat Dec 11 05:48:52 2021 -0500 + + pb-2113: vendor changes for kdmp from master branch. + +commit a8542eec3af49ed816f69080c3db05e9a71db340 +Author: sivakumar subramani +Date: Fri Dec 10 11:38:11 2021 -0500 + + pb-2116: Added kubernetes.io/azure-file provison in + csiDriverWithoutSnapshotSupport + + - moved the check to pv.Spec.CSI secion in CreateSnapshot up, + immediately after getting the pv content. + - Added a note to mention that filestore.csi.storage.gke.io gke + filestore support snapshot. + +commit 2d6758d5b2a104066450d69bbf8f40cb0c308daf +Author: diptianjan +Date: Fri Dec 10 11:06:59 2021 +0530 + + vendoring in latest kdmp. + +commit 4ab3a2976f98dba9a69aa313d392ea20232d35f9 +Author: sivakumar subramani +Date: Wed Dec 8 09:03:56 2021 -0500 + + pb-2110: Fixed the issue in return value of IsCSIDriverWithoutSnapshotSupport api for non-csi case + +commit 76e500ff1027b5cb781050d5e0f87752f28ee5ec +Author: sivakumar subramani +Date: Tue Dec 7 13:24:43 2021 -0500 + + pb-2101: vendor latest from master kdmp branch. + +commit 455059e68e28fd8bbc76fd59b789d4855dd116c9 +Author: sivakumar subramani +Date: Sat Dec 4 12:56:45 2021 -0500 + + pb-2098: Added google file and azure file check to default to KDMP + driver. + + - Removed IsDriverWithoutSnapshotSupport and + isCSISnapshotClassRequired. + - Added a IsCSIDriverWithoutSnapshotSupport common API in volume pkg + and adding it in kdmp and csi driver. + - Also added csiDriverWithOutSnapshotKey configmap field to get user + input for driver that does not support snapshot. + +commit 253823493dfc8193627994edd4a4fd1ab1ae4ba9 (tag: v2.8.1) +Author: diptianjan +Date: Mon Dec 6 21:14:43 2021 +0530 + + [PB-2104]: Using , as the delimiter for the composite string of volumesnapshot. + +commit 798dedbbc8fd13ff15ddaa3c18c7ef12733a3703 +Author: diptianjan +Date: Thu Dec 2 16:55:32 2021 +0530 + + [PB-2094]: Preventing readding of volumesnapshotname in the applicationrestore CR. + +commit e18d7ab585343b8f9f4d9670872e0f620dc62f42 +Author: Prashanth Kumar +Date: Fri Dec 3 07:26:25 2021 -0500 + + Fixing kdmp restore across Azure regions + - As part of the restore, fetch the driver from the vol info instead of calling GetPVDriver() + - Removed volume.kubernetes.io/selected-node annotation from the PVC spec or else Azure PVC + always tries to get bounded to the same node which might not be present + +commit ed6344395196bf16ea7ec98b326b9e745d611df2 +Author: sivakumar subramani +Date: Fri Dec 3 08:25:53 2021 -0500 + + pb-2101: Fixed the issue in isCSISnapshotClassRequired function such + that for non CSI volumes, it will directly go the kdmp backup + + - Added hostaccess scc in the backup job for ocp platform. + +commit b3ef06d0ac77aa2fcb27525c813faf7377a116ec +Author: diptianjan +Date: Mon Nov 29 20:22:45 2021 +0530 + + Vendoring in latest kdmp changes. + +commit 4971dde10020b335b378ab69fe3ef5f4201d7cfb (origin/pb-2065-fix) +Author: Ram Suradkar +Date: Wed Nov 24 05:14:13 2021 +0000 + + revert clsuterolebinding collection based on user access to SA + + Signed-off-by: Ram Suradkar + +commit b0a612a2c0c1562db1bf6148dabd6af8d5f790ac +Author: Ram Suradkar +Date: Tue Nov 23 18:41:36 2021 +0000 + + PB-2079: collect clusterrolebinding subjects if sa is not found + + Signed-off-by: Ram Suradkar + +commit 2844ee938a69e354583d10ec73b649042dbfd424 (origin/pb-2075) +Author: Prashanth Kumar +Date: Mon Nov 22 09:07:14 2021 -0500 + + vendor update from kdmp master + +commit 911f9b4d7a0103f24e7b296581698511aaa90778 +Author: Prashanth Kumar +Date: Sun Nov 21 14:28:31 2021 -0500 + + Skipping namespaces which are not part of restore ns mapping + +commit d5f1968e0c6ccd80472e968d3e35f904464c4ee0 +Author: Prashanth Kumar +Date: Fri Nov 19 12:59:55 2021 -0500 + + Adding env variable to specify market place deployment + - user can set env MARKET_PLACE=aws to specify the stork being deployed on + AWS marketplace so that appropriate kopia executor image is picked from + market place repository + +commit 996b135ce10ee46602d1d6b3ed64259c0afa7659 +Author: Aditya Dani +Date: Thu Nov 11 03:51:58 2021 -0800 + + STOR-528: CSI VolumeSnapshotClass fixes + + - If the SnapshotClassName is set as "default" or "Default" + let Kubernetes handle the snapshot class selection. It wil use + the default snapshot class set by the admin for that CSI provider. + + - Do not create stork-* volume snapshot class on startup if a snapshot + class for the CSI driver already exists. + +commit 3b570e260980728db57a736cfb38ad880c25c929 +Author: Prashanth Kumar +Date: Sat Nov 20 08:54:20 2021 -0500 + + vendor kmdp master changes + +commit 102cb76fa35c17c3814ea8397651361b9425f6b8 +Author: sivakumar subramani +Date: Fri Nov 19 13:20:30 2021 -0500 + + pb-2061: removed the incorrect error handling while calling + runtimeclient.ObjectKeyFromObject function. + +commit f26546d3c2e02365ed290cf918c5b67ed7c0daae +Author: diptianjan +Date: Sun Nov 14 23:30:03 2021 +0530 + + [PB-2010]: support for replace policy in generic restore. + Volume status to become RETAINED if replace policy is set to retain. + +commit 621a7a06d7f89eac70c91069c0faff2b594dbc9f +Author: sivakumar subramani +Date: Wed Nov 17 07:49:54 2021 -0500 + + pb-2036 Resetting restore PVC's PV to nil irrespective of storageclass map presence. + +commit c80e6a289a46f35565a93a5f3d2af31790c5ee5c +Author: sivakumar subramani +Date: Tue Nov 16 13:29:10 2021 -0500 + + vendor changes for kdmp repo from master branch + +commit 57ffbba72a69f1ae55c735492da092ee6d636b3a +Author: Aditya Dani +Date: Wed Nov 17 17:07:22 2021 +0530 + + Change the pull request template + +commit f34c13b723e3182564f383a7f2c4585a20ed05c4 +Author: diptianjan +Date: Mon Nov 15 21:55:59 2021 +0530 + + [PB-2030]: pv/pvc should be included as resources in kdmp restore list. + +commit 99e44c8183308007b8093395b9207fcabcf9cc5f (origin/pb-2016) +Author: Aditya Dani +Date: Thu Nov 11 02:54:13 2021 -0800 + + STOR-556: OCP Migration improvements. + + - Do not collect DeploymentConfigs owned by operators. + - Do not collect openshift-service-ca.crt config map created by + Openshift in every namespace. + +commit 738d7529eb9fb0531fae14ea7973276e1f8e2f05 (origin/pb-2023) +Author: sivakumar subramani +Date: Sun Nov 14 15:56:02 2021 -0500 + + pb-2020: Added check in assigning SnapshotStorageClass in dataexport CR + + - Assigng the SnapshotStorageClass in DE CR only when + LocalSnapshotRestore is true, in which case, we will try the + local snapshot restore. + +commit 6421f416e268a3e57c6eaed86a304f644deb2666 +Author: diptianjan +Date: Sat Nov 13 23:49:36 2021 +0530 + + Vendoring in latest kdmp changes. + +commit e02f28e240e975461486adac2fa2e982db5152ef +Author: diptianjan +Date: Sat Nov 6 00:07:47 2021 +0530 + + [STOR-513]: support for local restore for csi volumes. + +commit dd2092c53a724c1253220130ff37a97949549cd8 +Author: sivakumar subramani +Date: Fri Nov 12 16:47:05 2021 -0500 + + pb-2015: remove the storage-class and storage-provisioner annotation + from pvc while restore it. + + - Even though we updated the spec.storageclass of the pvc to the + new storage class from the storage class, while restoring + ,it was not getting bound. The k8s was referring to the old + storage class from the annotation and pvc was struck in + pending state. + +commit 9718d1ff9852f17595374d77b74296cbb3ae6118 +Author: sivakumar subramani +Date: Fri Nov 12 21:24:17 2021 -0500 + + pb-2003: added ordered way of classifying the driver in GetPVDriver api + + GetPVDriver is used in the resource restore path and we try to + avoid CSI pvc applied again. Since ordered list is not used in + this function, some it ended up in kdmp driver and ended in + partial success. + +commit 347ffbf96bb9d52708bc6a37afdaaaf8bc1198ab +Author: diptianjan +Date: Thu Nov 11 23:07:41 2021 +0530 + + Fixing volumesnapshot name and handling already exists for volumesnapshot and content. + +commit 0775ca36730abf3a638a2e493fa00c917e817962 (origin/pb-2009) +Author: diptianjan +Date: Wed Nov 10 11:11:27 2021 +0530 + + Putting the volumesnapshot class and backupUID in volume info as these are required in restore. + +commit 3b1295efc88bfd9abffd4ea800af36f7a253e732 (origin/stor-514) +Author: diptianjan +Date: Mon Nov 8 18:06:12 2021 +0530 + + vendoring in latest kdmp changes. + +commit c7d81c69e91f69634a531dfc431c86acf4edaacb +Author: Prashanth Kumar +Date: Sat Nov 6 07:30:43 2021 -0400 + + stor-547: Fixing crash when taking a backup of CSI and PXD vols + - If backupType is Generic in kdmp-config, use this for all drivers + except pxd to take generic backup + +commit 496fcd87ccdd3dc9b6bef491c1c476b0d9093d09 (origin/stor-2003) +Author: diptianjan +Date: Wed Nov 3 19:20:26 2021 +0530 + + [STOR-551]: pvc creation for kdmp restore should happen in kdmp controller. + +commit f865cb5dc48b400fd6b15d4a506ff1d0d261944f +Author: siva-portworx +Date: Fri Nov 5 05:24:20 2021 -0400 + + vendor kdmp chnages from master branch + +commit f496102140c3ee3172bdd769d1efa8f4e8e1a3a5 +Author: siva-portworx +Date: Thu Nov 4 23:20:44 2021 -0400 + + vendor changes for openstorage repo from release-9.1 branch. + +commit 34afda6d8f143e7aafbac71fc175ddd8a7591b32 (origin/stor-545-vendor-issue) +Author: siva-portworx +Date: Tue Nov 2 05:23:36 2021 -0400 + + pb-1997: Added check not to intialize the snapshot class in the DE, if + CSI does not support snapshot or cases where we want to skip CSI and + take generic backup (proxy-volume) + +commit 88ec148875262be67266d259a228dff0bebc7023 +Author: siva-portworx +Date: Fri Oct 29 06:10:58 2021 -0400 + + stor-514: Added checks such that pure FB and vsphere PVC will be + defaulted to kdmp driver for snapshot. + +commit 088d8fdeaa377052ced2b24b40c7de60009c9040 +Author: siva-portworx +Date: Wed Nov 3 01:42:31 2021 -0400 + + vendor changes of kdmp from master branch + +commit c5afc04d779bdd4316f13c759de19809c27c1d49 +Author: siva-portworx +Date: Mon Nov 1 07:39:57 2021 -0400 + + pb-1988: Added alreadyExists check for creation of volumeSnapshot and + PVC creation + + - While deleting the snapshot CRs, setting the proper retain + value. For latest snapshot, retain will be set true, so that local + snapshot will be retained. For other snapshots, it will be set + false, so that both the CR and local snapshot files are also + will be deleted + +commit 3eba3cbbe1ebd31a077be0787c977552c2f29203 +Author: siva-portworx +Date: Tue Nov 2 13:59:23 2021 -0400 + + stor-545: Added provisioner and volumesnapshot fields in applicationbackup CR. + +commit de495f217e7c9a7b646ac8cc385e7bdd926548f6 +Author: siva-portworx +Date: Tue Nov 2 13:57:50 2021 -0400 + + stor:545 vendor for kdmp from master branch + +commit ffa64d33f81da4e239a05a59dbb33f58b95377cf (origin/pb-1988) +Author: siva-portworx +Date: Thu Oct 28 07:02:09 2021 -0400 + + vendor latest kdmp from master branch + +commit 6b1ee47a38b7a0f79b3204e0dfa3a6826f7fc814 +Author: Prashanth Kumar +Date: Tue Oct 26 14:07:45 2021 -0400 + + Changed "generic type string to "Generic" + +commit 477a879b25d5220680b8bce278ca617cfd1bd188 (origin/pb-1982-fix, origin/pb-1982-1) +Author: diptianjan +Date: Mon Oct 25 16:49:17 2021 +0530 + + Vendoring latest kdmp changes. + +commit 964e1d8300eb989d9b0045b1b2407cb5313550e7 +Author: diptianjan +Date: Mon Oct 4 20:03:04 2021 +0530 + + [STOR-481]: Uploading CRs in csi generic backup. + Made the required changes as part of snapshotter package. + +commit 6883a2cd0a78dbdae38110a91a7e562593ed2011 +Author: siva-portworx +Date: Sat Oct 23 01:12:02 2021 -0400 + + pb-1981: Adding kdmp CRs to skip during applicationregistration. + +commit 76dec1e5d808b0537e5e7fe39971128a9574a79b (origin/pb-1982) +Author: Ram +Date: Fri Oct 22 14:25:18 2021 +0530 + + STOR-537: Reverse Migration is failing for PX pv objects + + - improve volume bound time + - skip pv update if respective pvcs are skipped + + Signed-off-by: Ram + +commit c9b0d02b75a97e9f92d968e0c9bd0e8c03d6cc51 +Author: siva-portworx +Date: Fri Oct 22 03:55:19 2021 -0400 + + pb-1978: Adding deletionTimestamp check before calling delete for dataexport CR. + +commit ec2e7d3eddf9b9879fa8c07509819030bddb02a9 +Author: Prashanth Kumar +Date: Thu Oct 21 07:31:49 2021 -0400 + + Reading backup type from config map + - User can set all backups to be generic by adding BACKUP_TYPE=generic in kdmp-config map + - This way all backups would be forced to be generic + +commit 89fefe24385bf17e853cf76cd075b01b587b99d2 (origin/pb-1966) +Author: Aditya Dani +Date: Thu Oct 21 14:21:32 2021 -0700 + + Remove wget from Dockerfile and use curl instead + + - Reduces the vulnerabilities reported by DTR + from: + + 15 critical + 58 major + 25 minor + + to: + + 7 critical + 27 major + 22 minor + +commit a5fefcb5309db5da8558b465b40ef006b233a7e2 +Author: Aditya Dani +Date: Thu Oct 7 15:40:26 2021 -0700 + + Modified the decision making process for choosing a driver for a PVC. + + - Create an ordered list of drivers defined in STORK. For every PVC + stork will check these drivers can handle this PVC. If all the drivers + fail the last one in the list - KDMP will pick it up. + - The goal is KDMP should handle all kinds of PVCs for all the different + workflows that stork supports. + - This also means that for currently non-supported APIs by KDMP driver + the respective operation will fail if none of the other drivers support that PVC. + +commit da58d384827f250ee6c5fca50e52833253d236ae (origin/pb-1975) +Author: siva-portworx +Date: Tue Oct 19 07:49:01 2021 -0400 + + stor-529: Calling cleanupResources as part of finalizer handlers in applicationbackup controller code + +commit 4504d85f717aeedaa895741c517517d2445ca643 +Author: Prashanth Kumar +Date: Wed Oct 20 00:30:27 2021 -0400 + + vendor update for kdmp + +commit 79a787fa7fac963177a7cf7c00b96a6fb19b7f2a +Author: Ram +Date: Mon Oct 18 23:30:41 2021 +0530 + + Remove stork-scheduler version update from integration test script + + Signed-off-by: Ram + +commit 09f413387529b9395bbc12eed41715610a2f2272 +Author: Ram +Date: Thu Oct 14 19:24:32 2021 +0530 + + update migration status for each pv + + Signed-off-by: Ram + +commit b34b23c22405e93bb9b8bbd3d34727c3a5db6c9f +Author: Ram +Date: Thu Oct 14 14:22:18 2021 +0530 + + STOR-530: Unable to take backup on GKE 1.21 cluster + + fix deprecated zone label for pv + + Signed-off-by: Ram + +commit c73c5b896fece5123977158b54a9b0bf0133205f +Author: Prashanth Kumar +Date: Mon Oct 18 09:59:32 2021 -0400 + + vendor update for kdmp + +commit 8dfb41df7cd9df0b30a558b6feeb4cba821e56f1 (origin/pb-1962) +Author: siva-portworx +Date: Tue Oct 12 15:41:54 2021 -0400 + + vendor changes for kdmp from master branch + +commit 55ebe4dca9bce5543d636e459315cfe9d8c5c220 +Author: siva-portworx +Date: Mon Oct 11 14:23:55 2021 -0400 + + pb-1802: Added steps to create the kdmp-config cm with default values. + + - Initially creating the kdmp-config config map with the default + values need for kdmp job rate limit values and container + resource limit values as well. + - Also added check to include Final stage in the failure check + of kdmp driver. + +commit 5f7672036ecea70c0f0c18236f846a6b68dda51b +Author: Aditya Dani +Date: Tue Oct 12 13:01:44 2021 -0700 + + STOR-526: Use python 3.9.2 in stork image. + + - Python 3.x until 3.9.1 have a security vulnerability - CVE-2021-3177 + - Updated the python version in the container to 3.9.2 + +commit dfae08b0e45a434756f2e90f52a7f741b7634b4a +Author: Aditya Dani +Date: Tue Oct 12 10:49:24 2021 -0700 + + STOR-527: Add nil check in GetPodVolumes portworx implementation. + + - A PVC object can be empty if the PersistentVolume is directly + provided in the pod spec. + +commit 2d69baba429571c28e4ce6a3fca6d51889726e27 (origin/pb-1802-1) +Author: Prashanth Kumar +Date: Fri Oct 8 16:04:28 2021 -0400 + + stor-522 Fetch backup and restore size or data mover + +commit 943077c0288ba0aa44d6d933d43c97546c03fc34 +Author: Aditya Dani +Date: Sun Oct 10 08:44:05 2021 -0700 + + STOR-398-v2: Add plural forms as short names for VolumeSnapshots and VolumeSnapshotDatas + +commit 09ff758c99ddf882f6877e6f80dbfac163a29dbe +Author: Aditya Dani +Date: Fri Oct 1 14:47:47 2021 -0700 + + vendor update from libopenstorage/openstorage + +commit 53e732e0cbc709cd06383824144fb35f09cb08aa +Author: Aditya Dani +Date: Thu Sep 30 17:31:36 2021 -0700 + + Portworx: Parse the storage class mapping while restoring a volume. + + This change allows backing up a Portworx volume of a particular storage class + and properties, and restore that volume with a different storage class. + For ex. Backup a volume with HALevel=2 and restore it with HALevel=1 + + - Parse the storage class mapping from restore spec and find if a mapping + for the current PVC's storage class is found. + - If found, fetch the actual contents of storage class and parse them into + a RestoreVolumeSpec. + - Invoke the CloudBackupRestore API with this RestoreVolumeSpec. + +commit ae7f793dcb5ced8f6c784fecf05271b8877d1a8a +Author: Ram +Date: Tue Oct 5 16:15:06 2021 +0530 + + ptx-1404: nit msg for storkctl activate migration on crds + + Signed-off-by: Ram + +commit 32732ed798324413c228de8c2c88ac28a8bb4fcd +Author: Ram +Date: Tue Oct 5 16:14:43 2021 +0530 + + stor-520: handle nil entry for ns labels while restore + + Signed-off-by: Ram + +commit 21f38b24e9b2391ec8bb19c52789dd31b1cc9801 +Author: Prashanth Kumar +Date: Wed Oct 6 22:14:40 2021 -0400 + + vendor update for kdmp - fixed job pending issue + +commit 28160efc05cbcfbd6d97550ec14193a6886571f0 +Author: Ram +Date: Tue Oct 5 15:53:56 2021 +0530 + + stor-509: Update pvc uid mapping while migrating pv objects + + CSI PVC does not go into bound state on DR cluster if pv claimref has + invalid pvc uid + + Signed-off-by: Ram + +commit 95179147a087d3ce386e4fbd224de1df998d8c59 +Author: Prashanth Kumar +Date: Wed Oct 6 01:15:09 2021 -0400 + + vendor update for kdmp + +commit c670be7925d2c605a199c849695d86fae48cc77b (origin/pb-1856) +Author: Aditya Dani +Date: Mon Oct 4 14:26:18 2021 -0700 + + KDMP: Before starting a restore check if destination PVC is in Bound state. + + - Check for both pvc.Spec.VolumeName is not empty and the PVC status is in Bound state. + +commit bcf7e2bfd81f36fc9c8abdb403fe6c27bef1093a +Author: Aditya Dani +Date: Mon Oct 4 09:25:16 2021 -0700 + + Portworx: Check for auth params in in-tree storage class definition as well. + +commit f7b00bd26bb32c6ede8b707b74e9499eebbe4a04 +Author: siva-portworx +Date: Mon Oct 4 10:06:15 2021 -0400 + + pb-1941: remove the fix of changing the status to Inprogress for call cleanupResources. + +commit a59d39773a96e6a40f544f5db73d3be6f95be887 +Author: siva-portworx +Date: Mon Oct 4 09:26:54 2021 -0400 + + vendor changes for kdmp repo master + +commit 264b68e6821ea71cb8ecb31a1e5491c07b56d51c +Author: siva-portworx +Date: Sat Oct 2 11:24:13 2021 -0400 + + pb-1941,pb-1944: modified the generic backup resource name to be match + new naming format as given below + + - name format --- + - Calling cleanup function in Final case is creating issue as + px-backup will delete the CR as soon the stork updates the CR + status to success or failure and stage as Final. Because of that + application backup CR is getting deleted before dataexport CR. + - Now calling cleanup before moving the status of final stage + and success. + +commit 7e90929602d8adf5b26bb6e09a1f1510134f0045 +Author: siva-portworx +Date: Fri Oct 1 16:13:50 2021 -0400 + + vendor changes for kdmp repo. + +commit 2c7dd9dff943380dbf39e3ce990de513e711caa1 +Author: siva-portworx +Date: Thu Sep 30 13:33:24 2021 -0400 + + pb-1941: added pvc UID to be part of volumeInfo in ApplicationBackup and ApplicationRestore CR definition + +commit 5251ee38dcae0eeddf25d9293cb4378f76435749 +Author: Aditya Dani +Date: Sat Sep 25 08:25:19 2021 -0700 + + vendor updates from kdmp + +commit 67dbf03ec05d2c43aedff60f173cdf1a0b17daf6 +Author: Aditya Dani +Date: Fri Sep 24 09:34:24 2021 -0700 + + Add support for handling snapshots in KDMP driver. + + - Pass the snapshot storage class in the DataExport CR when provided + in the ApplicationBackup CR. + +commit f67574376f8814491755a77fb48ea0ab10dbd223 +Author: Aditya Dani +Date: Mon Sep 27 17:04:00 2021 -0700 + + STOR-398: Add short names for VolumeSnapshot and VolumeSnapshotData CRs. + + - vendor update from libopenstorage/external-storage + +commit 7803c7adc1c39c1e4434b2500a945c9cce816cea (origin/pb-1916-1) +Author: Ram +Date: Wed Sep 29 10:55:24 2021 +0530 + + [kdmp] remove postfix special chars while labelling crs + + - address review comments + + Signed-off-by: Ram + +commit fca8c6636db9db23345c1148fcdb24638c914e64 +Author: Ram +Date: Tue Sep 28 20:52:32 2021 +0530 + + Fix issue of multi-ns backup and restore for kdmp driver + + stor-512: restore completed as partialsuccess for generic backup + + Signed-off-by: Ram + +commit 41bbb1103ccaf60ddc249f91c6ce6cee9f35dc2b +Author: Ram +Date: Tue Sep 28 12:57:37 2021 +0530 + + Add CleanupResource api for backup/restore + + Signed-off-by: Ram + +commit e59eac2d59151994889f5d1ce9cb6442035fcbd4 +Author: Ram +Date: Mon Sep 27 10:56:24 2021 +0530 + + Cleanup DataExport CR after backup/restore + + Signed-off-by: Ram + +commit 4ae2c86c8db63740114b972862dac0fc7fa2f653 +Author: Prashanth Kumar +Date: Tue Sep 28 03:16:13 2021 -0400 + + vendor update for kdmp for ssl enable/disable + +commit 326cd5363a2a2da9bf83c36de0527849e15b3fb4 +Author: siva-portworx +Date: Tue Sep 28 01:58:06 2021 -0400 + + stor-510: Not classifying proxy-volume as pxd, even if protworx provisioner annotation is set + +commit 8aba1590582a3f9695602e7b67eacb4a333bea2b +Author: Ram +Date: Fri Sep 24 22:51:25 2021 +0530 + + Vendor updates sched-ops + + Signed-off-by: Ram + +commit df8c2cf49691750255fd2528b613dd143df1d5d6 +Author: Ram +Date: Fri Sep 24 22:50:58 2021 +0530 + + Update webhook configuration for v1.22+ k8s + + Signed-off-by: Ram + +commit 5649d7905f279dbb6de84f3d4c874abe62cbaec8 +Author: siva-portworx +Date: Mon Sep 27 13:11:03 2021 -0400 + + stor-506: Added worked around to delete the dataexport CR at the end, when all the snapshots are completed. + +commit 004803657a35ef8784279b7a60be061e9aff11c1 +Author: siva-portworx +Date: Sat Sep 25 17:09:57 2021 -0400 + + pb-1908: truncating the CR name and pvc names BY 63 char, while adding it as label in the dataExport CR + +commit 81341fbebede49e2c87ab114a2eba1df81a1267a +Author: siva-portworx +Date: Sat Sep 25 16:33:58 2021 -0400 + + vendor changes for truncate pkg + +commit 68d023ca0411fcf1430dfed3cd6c568d3f57fd0c +Author: siva-portworx +Date: Fri Sep 24 14:14:26 2021 -0400 + + pb-1908: Added backup and restore CR name and pvc name in the dataexport + CR. + +commit 11a01171d12169d7c4b9473fe9c2a70c3fcb91d1 +Author: Prashanth Kumar +Date: Mon Sep 20 13:44:40 2021 -0400 + + Handling kdmp in-progress job cancellation and deletion based on RetentionPolicy + +commit ba2e13b85471aa80b965d70d774cbb93a6dd1007 +Author: Prashanth Kumar +Date: Sun Sep 19 21:47:36 2021 -0400 + + vendor udpate for kdmp + +commit 97089f8df6351ee79232d83580492f7c11b28572 +Author: Aditya Dani +Date: Tue Sep 7 17:26:42 2021 -0700 + + Add a Snapshotter interface. + + - The snapshotter interface provides APIs for snapshots and restores. + - Extracted out the snapshotting functionality from the CSI driver and added it + to the snapshotter interface + + The goal is to separate out the snapshot functionality from the actual drivers so + that different components in stork can use it. + +commit 3d3c55483ac9eac974fd51d53cec64911c0b6682 +Author: Aditya Dani +Date: Tue Sep 21 17:48:16 2021 -0700 + + STOR-425: Do not silently fail cluster domain update commands for invalid domain. + + - Check if the input domain is part of the existing cluster domains. If it is not + fail the activate/deactivate command. + + Signed-off-by: Aditya Dani + +commit 324b5fbca9199057acf1ac88e6b5647b1f1ed12d +Author: Aditya Dani +Date: Tue Sep 21 16:45:59 2021 -0700 + + STOR-483: Skip filtering for volumes which do not have DataNodes populated. + + - Certain storage providers like Portworx proxy volumes or Portworx direct access volumes + do not have the volumeInfo.DataNodes populated. Do not filter out the request for + such volumes. + +commit d6c398ec818b224cf2872be82cf684c0ec94ff25 +Author: Aditya Dani +Date: Mon Sep 13 15:58:05 2021 -0700 + + [Portworx Driver] Add support for PX-Security enabled clusters. + + - Add support for backing and restoring PVCs which are provisioned + on a PX-Security enabled cluster. + + Backup: + - CSI Volumes: Use the token secret name/namespace specified + as a part of NodePublish secrets + - In-tree Volumes: Use the token secret name/namespace provided as annotations + - Save the secret name and namespaces as a part of backup.VolumeInfo.Options + so that they can be used while restoring. + - If a templatized namespace is provided, then store ${pvc.namespace} as a part + of backup.VolumeInfo.Options so that on restore the token secret in the namespace where + restore is being done is used. + + Restore: + - Use the token secret name and namespace provided as a part of backup options. + - If a templatized namespace is found use the secret in the namespace where the + PVC is being restored. + +commit 416cc7f6bb67e6dfbc7d798e2e6edc150dd22db0 +Author: Ram +Date: Mon Sep 20 17:03:24 2021 +0530 + + STOR-444: Update FinishTimestamp for failed applicationbackup + + stor-448: update labels & annot of namespace based on replace policy + stor-443: update activate/deactivate message for crds + + Signed-off-by: Ram + +commit 2f877c0c4f539d892e0ad6809ba38a0d4c0a5f79 (origin/master-siva-kdmp) +Author: Ram +Date: Wed Sep 22 16:29:08 2021 +0530 + + csidriverfix: handle completed backups + + Signed-off-by: Ram + +commit be7a00a6e6a32853c2d20e581ffb41f46deed480 +Author: Rohit-PX +Date: Tue Sep 21 17:58:55 2021 -0700 + + Add auth annotations to migrations triggered by migrationschedules + + Signed-off-by: Rohit-PX + +commit 46d39c7ba17439ac098c8c2140e74c89ffb1f90f +Author: Aditya Dani +Date: Tue Sep 21 18:04:13 2021 -0700 + + STOR-499: ApplicationRestores fail if there is a mix of CSI PVCs and other storage providers. + + - Skip the volumes which are not CSI PVCs in CSI driver restore path. + +commit 95b5446437ef4264a4c4761961b4263eac1d1d6a +Author: Aditya Dani +Date: Fri Sep 17 12:44:30 2021 -0700 + + STOR-459: CSI: Skip restore of PVCs if driver is not CSI + +commit 238047cfa43fc2bbb9523daa14c9b3acc86ed834 (origin/master-temp-siva) +Author: Ram +Date: Wed Sep 15 22:54:48 2021 +0530 + + vendor updates- kdmp + + Signed-off-by: Ram + +commit 0bba70b9c361c9c0537e391489b9bf761084d8bc +Author: Ram +Date: Wed Sep 15 22:55:23 2021 +0530 + + stor-455: Generic restore support + + Signed-off-by: Ram + +commit 40e5288a9f38f5433a86789e577c310b0c8eb91c +Author: Ram +Date: Thu Sep 16 19:32:23 2021 +0530 + + stor-485: add completed backup volume to backup list + + Signed-off-by: Ram + +commit 15fea41a173d59d1a0046288e2f910a1ada74471 +Author: Ram +Date: Wed Sep 8 19:25:29 2021 +0530 + + Set KDMP custom images to latest tag + + Signed-off-by: Ram + +commit 57f38c06d0be33c6eddc6db6668362d4aa916537 +Author: Ram +Date: Wed Sep 8 11:16:01 2021 +0530 + + vendor updates + + Signed-off-by: Ram + +commit b3519171190bae929abc05cbacab8c46e3a3ce22 +Author: Ram +Date: Tue Sep 7 23:43:44 2021 +0530 + + start kdmp controller from stork + + - implemented GetBackupStatus() for kdmp + - implemented DeleteBackup() for kdmp + - support generic backup type + + Signed-off-by: Ram + +commit f3d706348018e98171982e167d09f250a2893280 +Author: Ram +Date: Tue Sep 7 23:41:22 2021 +0530 + + vendor update schedops,kdmp + + Signed-off-by: Ram + +commit 2eb4df7710e38e4539a80b3e63a0850b69ecf611 +Author: Ram +Date: Mon Sep 6 22:06:14 2021 +0530 + + stor-462: detect pvcs to be collected for kdmp driver + + Signed-off-by: Ram + +commit 1cc475a3d99eb80bd33cc6241b7dca60ccd534b8 +Author: Ram +Date: Mon Sep 6 22:05:19 2021 +0530 + + stor-463: vendor updates KDMP + + Signed-off-by: Ram + +commit 62cec2a70e06d26382aa36378bf39525cb98fc68 +Author: Ram +Date: Wed Sep 8 15:41:43 2021 +0530 + + allow list of crd via v1beta1 apis + + - stork tries to list crds via v1 apis which is not supported + for older k8s versions + + Signed-off-by: Ram + +commit 5a1f8a4142bf301efe146d065684e72c6f07a019 +Author: Rohit-PX +Date: Thu Sep 2 10:50:12 2021 -0700 + + Add new parameter for kube-scheduler version + + Signed-off-by: Rohit-PX + +commit a33dbd5084a7288e991946006c7c3082df2ba8c4 +Author: siva-portworx +Date: Thu Sep 2 10:11:18 2021 -0400 + + stor:468 Changed the field name GenericBackupRepoKey to RepositoryPassword in backuplocation CR + +commit e3ed875d73038e495dc45c2de1882bfbf8b9f068 +Author: siva-portworx +Date: Wed Sep 1 04:32:47 2021 -0400 + + stor-467: Added genericBackupRepoKey field in backuplocation CR for + storing generic backup repo password. + +commit c2b5a3de3b0e25bb393079323d9aef0f92e00f05 (tag: v1.4.0) +Author: Rohit-PX +Date: Thu Aug 26 15:11:21 2021 -0700 + + Make version check optional in integration tests + + Signed-off-by: Rohit-PX + +commit dbf03a3e170f6888124d71d9c43bbc984ec0527d +Author: Rohit-PX +Date: Wed Aug 25 11:42:59 2021 -0700 + + New test for volume snapshot restore with vdbench data + + Signed-off-by: Rohit-PX + +commit 4ffc964e861ca7c1fba432d2e65e8d48ad448813 +Author: Ram +Date: Mon Aug 23 23:56:48 2021 +0530 + + PB-1363: Unable to take csi + px volume backups + + Signed-off-by: Ram + +commit 2a2a656c01729580f2a0666072898d750c103189 +Author: Aditya Dani +Date: Tue Aug 24 22:30:37 2021 -0700 + + Added migration failover and failback tests. + + - The existing migration tests setup one way cluster pairs between two clusters + however the migrations also happen only in one direction. Once the migration + is complete the resources are deleted and a reverse migration is triggered. + - This does not execute the scenario of failover and failback of the same + application and volume between the two clusters. + - Currently the test is only added for portworx driver. Using portworx encryted volumes with mysql statefulsets. + +commit 044b271694cafee5501da227a19bb4457f6e354b +Author: Ram +Date: Wed Aug 25 11:03:39 2021 +0530 + + check destination svc annoatation before assigning keys + + Signed-off-by: Ram + +commit 24078363cc2475142226d76c989f91ae6ff41e9c +Author: Ram +Date: Mon Aug 23 23:20:11 2021 +0530 + + Correct applicationbackup schema spec for BackupType + + Signed-off-by: Ram + +commit fa823b8b7139e0aff62c4675ab9fe5e1c1f2176b +Author: Ram +Date: Mon Aug 16 20:18:56 2021 +0530 + + vendor updates + + Signed-off-by: Ram + +commit f5abbcfc2627d89615649df9e427d0a45cb4a440 +Author: Ram +Date: Mon Aug 16 20:18:36 2021 +0530 + + support v1 crd based on k8s version + + Signed-off-by: Ram + +commit 670dcdf1439f167e0e61922dccb3d5b8e9139b8f +Author: Aditya Dani +Date: Wed Jul 28 14:38:18 2021 -0700 + + Fix the ClusterRoleBinding in prometheus specs. + +commit 4da3fdd765809e43ba29f2d39821aa46f97e6fbb +Author: siva-portworx +Date: Thu Aug 19 04:08:35 2021 -0400 + + stor-458: Added following changes in stork CRD for data mover feature. + + - Added backupType in ApplicationBackup and ApplicationBackupSchedule + CR definitions. + - Added storageClass in ApplicationBackupVolumeInfo definition. + +commit 748124ffafcbcbb67b87cae27cf0f3c8bb311e43 +Author: Aditya Dani +Date: Mon Aug 16 14:08:49 2021 -0700 + + Do not force delete pods in VolumeSnapshotRestore. + + - Force deleting the pods immediately deletes the pod objects from k8s etcd, + which gives a false indication to stork that the pods have been deleted. The + subsequent restore call fails since the underlying PVC is not yet detached by k8s. + + Signed-off-by: Aditya Dani + +commit e0416ca19d6f2e05f050389cc643378e67468eb0 +Author: Ram Suradkar +Date: Tue Aug 10 17:14:44 2021 +0000 + + move skipservicemodify check + + Signed-off-by: Ram Suradkar + +commit 7511262ac7ed7a5ffc7372c8659f9be71ab698fc +Author: Ram Suradkar +Date: Tue Aug 10 14:55:01 2021 +0000 + + [portworx] wait for driver to come online in case of in-place restore + + Signed-off-by: Ram Suradkar + +commit 6c1007168818a47db2f2a6ccd9ff9fbc1a2542f0 +Author: Ram +Date: Tue Aug 3 16:41:59 2021 +0530 + + vendor update hashstructure + + Signed-off-by: Ram + +commit b6ceadd67b1200f75dbb16c87d3bf7031c0953df +Author: Ram +Date: Tue Aug 3 16:31:05 2021 +0530 + + [migration] only update svc if source svc has been changed + + Signed-off-by: Ram + +commit f7458002924ff2ddd455d6f536459a30d142ffa8 +Author: Ram +Date: Thu Jul 29 18:58:47 2021 +0530 + + handle nil check for networkpolicy ipblock + + Signed-off-by: Ram + +commit b3358f1952fc1c3e5719ae66fd8f32cb65c4da51 +Author: Aditya Dani +Date: Fri Jul 23 23:50:06 2021 -0700 + + STOR-441: Migrations are deleting the backing PX volume. + + With the recent changes for updating the PVC size as a part of Migrations we are now doing the following + + - Update the reclaim policy on the PV to Retain + + - Delete the PVC + + - Recreate the PVC and PV + + However due to a bug the Retain policy was not set and the volume was getting deleted. + +commit 2bdef2f941a58f96e29f6844c2ffbd699f2f41e0 +Author: Ram +Date: Thu Jul 22 22:58:43 2021 +0530 + + Fix integration tests intervalScheduleCleanupTest + + Signed-off-by: Ram + +commit f3707a86a1257bdc09f67ea08b6ecf25e2458260 +Author: Ram +Date: Wed Jul 21 23:14:19 2021 +0530 + + stor-415: remove svc ports while cloning services + + Signed-off-by: Ram + +commit 3e15faa6803788dbe45894c16c801b3ae17a6974 +Author: Ram +Date: Tue Jul 20 18:30:09 2021 +0530 + + stor-435: Migration failing, pvc already exists + + Signed-off-by: Ram + +commit d15d8f63d2e0a68ece8df1a9e7d2ddcf39e62156 +Author: Ram Suradkar +Date: Wed Jul 21 14:28:10 2021 +0000 + + stor-439: Backup respenctive pv object for pvc resourcetypes + + Signed-off-by: Ram Suradkar + +commit 470afc7e3bf8bd698ea002c99bb4fa22e76d2b63 +Author: siva-portworx +Date: Wed Jul 21 11:12:40 2021 -0400 + + Added extra check such we skip volume backup only ResourcType is not empty and does not contain PVC in it + +commit 708689004a4503ca602e5f596be2c58545f853b4 +Author: Ram +Date: Fri Jul 16 23:44:32 2021 +0530 + + Omit checking sha for stork version checks + + Signed-off-by: Ram + +commit 5a50585661c03ff438c56eed7b2db6c9e2179585 +Author: Ram +Date: Wed Jul 14 18:37:02 2021 +0530 + + stor-432: storkctl does not activate/deactivate mongodb CR's + + Signed-off-by: Ram + +commit fd83b8a6e053f2bbee101dfc7495a76d34224544 +Author: Ram +Date: Wed Jul 14 18:36:47 2021 +0530 + + use pluralizer rules to collect CRD + + Signed-off-by: Ram + +commit 0922aafd843f0e8befbaa0257728e1eb67f4507e +Author: Ram Suradkar +Date: Sun Jul 11 06:46:23 2021 +0000 + + Removed incorrect return statement in the backupVolumes function + +commit d828f3b24c082ce3721d63a54abed3e2cfba393c +Author: Aditya Dani +Date: Fri Jul 9 11:02:13 2021 -0700 + + Add a nil check in volume snapshot schedule metrics reporting. + +commit adcbc5df0dea78b0621b1a9535910dfe3e836327 +Author: Aditya Dani +Date: Thu Jul 8 17:10:55 2021 -0700 + + STOR-393: Add a SAN to the webhook certificate created by stork. + + - Delete the old k8s secret which had the invalid cert and create a new one + on startup. + +commit 590fd09760ecbc3db988fffe86ba6d081539b055 +Author: Ram +Date: Thu Jul 8 18:44:21 2021 +0530 + + Update service account resource instead of delete/create + + Signed-off-by: Ram + +commit cfacfbf7ddcc8898aa426455d8cee68ff935e742 +Author: Ram +Date: Wed Jul 7 17:19:10 2021 +0530 + + integration-test to verify stork version + + Signed-off-by: Ram + +commit d044b4f0ca6a489280da3eebabc18524c542b4bf +Author: Ram +Date: Wed Jul 7 17:12:16 2021 +0530 + + stor-423: add mongodb cr backup/migration support + + Signed-off-by: Ram + +commit b965cf79065ec2d4aa20c4ebc46aee55a144132a +Author: Aditya Dani +Date: Wed Jul 7 11:00:13 2021 -0700 + + Fix the issues caused by git merge conflicts. + +commit cfc0a9c0dcd6a9dccea0f8593d8ad0c621211350 +Author: Aditya Dani +Date: Fri Jun 25 08:39:04 2021 -0700 + + Handle exponential backoffs for ApplicationRestores. + +commit f50b1ef595dc556833a41a8982aa7278bde2236a +Author: Aditya Dani +Date: Thu Jun 10 07:45:16 2021 -0700 + + Do not fail migration/backups when the storage provider returns ErrStorageProviderBusy + + - Handle AWS and Portworx providers. + - If the storage provider returns a known "BUSY" error the driver wraps it + and returns to the controller. + - If the controller sees this error, instead of marking the backup as failed it will retry + in the next reconcile loop. + +commit cc11e976bc0d0b43e969d08bb380a714c197bb7c +Author: Aditya Dani +Date: Thu Jun 3 16:22:16 2021 -0700 + + Update openstorage vendor to release-9.1 + +commit f9d222a92ec86869ef5e31240ff0ba5f56fe0927 +Author: Ram +Date: Thu Jul 1 18:28:23 2021 +0530 + + stor-419: store stork version in configmap + + Signed-off-by: Ram + +commit e3fd22331e6d980232ec7afeaf4502507a702e7f +Author: Ram +Date: Thu Jul 1 16:21:43 2021 +0530 + + PB-1679: cluster-scope resource collection + + Signed-off-by: Ram + +commit dcac8089359ae81fc40c4e9293454f4b4b5e777b +Author: Rohit-PX +Date: Wed May 26 01:25:54 2021 -0700 + + Integration test for creating reverse clusterpair using storkctl + + Signed-off-by: Rohit-PX + +commit 3e2efa5ce47c5b7fd8817b3643cbeb3a989a2606 +Author: Ram +Date: Fri Jun 25 18:37:53 2021 +0530 + + Allow setting k8s client api rate limiter + + Signed-off-by: Ram + +commit 72271f246855c2421e84a75d8fdb3782cc2f3691 +Author: Prashanth Kumar +Date: Fri Jun 25 04:45:36 2021 -0400 + + Backing up selected resource and all resoucres of given namespace + - User can choose to backup all resources in one namespace and selected resources + in another namespace. + + Signed-off-by: Prashanth Kumar + +commit d983afae5f3ddb731193b6648e156ce766ba94d1 (origin/master_stor-409) +Author: Prashanth Kumar +Date: Thu May 27 08:44:29 2021 -0400 + + Reading rule CR from kube-system ns when multiple ns are selected for backup + + Signed-off-by: Prashanth Kumar + +commit 2e5808966ce91feecfa5558d477ea05f6526469c +Author: Ram +Date: Tue Jun 8 22:15:00 2021 +0530 + + stor-397: merge annotation for SA during restore/migration + + Signed-off-by: Ram + +commit 9833cbac3ed5bd431bed21a8c976a3dd2f97dc72 +Author: Ram Suradkar +Date: Tue Jun 8 14:03:46 2021 +0000 + + codegen auto-generated files + + Signed-off-by: Ram Suradkar + +commit ce2b1f3a82bdb95838c967e8c6bc196551aa7eed +Author: Ram Suradkar +Date: Tue Jun 8 14:02:57 2021 +0000 + + stor-262: add user option to skip service resource updates + + Signed-off-by: Ram Suradkar + +commit 4884d373602a85c5c92db097328ccdad95813f86 +Author: Ram +Date: Mon Jun 21 17:04:01 2021 +0530 + + stor-411: prevent invalid cr updates + + Signed-off-by: Ram + +commit 68cd7af7b795f0e092129386d2f0b4d366add5a2 +Author: Ram +Date: Mon Jun 21 17:01:03 2021 +0530 + + Dont collect service accounts for user with no list permission in namespace + + Signed-off-by: Ram + +commit a57a188287caa8e4dcb6e4f7a2f3a49c4683ee94 +Author: Jim Ou +Date: Mon Jun 14 16:26:14 2021 -0600 + + [STOR-383] add metrics for volume snapshot schedule + +commit 75fcb2c975ff858d83b4102cd022f75cb8b298d5 +Author: Prashanth Kumar +Date: Fri Jun 4 07:56:41 2021 -0400 + + Backing up NetworkPolicy and PodDisruptionBudget objects + - NetworkPolicy with CIDR set are not backed up + + Signed-off-by: Prashanth Kumar + +commit e04bd349f164fde6ca79eeb5f4d030e3168fe13a +Author: Aditya Dani +Date: Fri Jun 11 11:49:48 2021 -0700 + + Add a new annotation to skip PVCs from stork's scheduler scoring algorithm. + + - Use the stork.libopenstorage.org/skip-scheduler-scoring: true annotation on PVCs + which should not be considered while scoring nodes in scheduler's prioritize request. + + Signed-off-by: Aditya Dani + +commit d41e52c3055717fd90828b16af13c1eb72de309d +Author: Prashanth Kumar +Date: Mon Jun 7 07:33:55 2021 -0400 + + Skipping backing up of gke-resource-quotas + - On GKE when a namespace is created by default this gets + created so no need of backing it up + +commit 41fd4bb8122fc9d9d2ffb730c9f34e6c158c5a51 (origin/Jim) +Author: Ram +Date: Mon Jun 7 14:06:40 2021 +0530 + + fix namespaced schedule policy cache + + - use seperate cache watch listner for namespaced policy cache + store + + Signed-off-by: Ram + +commit cf4837a92f72ef00a1ca2f647b79802175b80b8e +Author: Ram +Date: Mon May 24 20:12:36 2021 +0530 + + Update latest pvc specs while doing migration + + Signed-off-by: Ram + +commit f16f61ba373c03aa930232c37954a82ee410a915 +Author: Jose Rivera +Date: Fri May 28 19:24:15 2021 -0700 + + Yum has vulnerabilty issues, no need to install and use that. Just use microdnf. + + Signed-off-by: Jose Rivera + +commit f93a58d5cb17e3ffe62cd13187cf39e2078c2018 +Author: Ram +Date: Tue May 25 18:52:15 2021 +0530 + + Return err check while updating cloned namespace + + - fix issue stor-391: application clone fails if dest namespace + already exists + + Signed-off-by: Ram + +commit 6a04cfb8034630c87556664951a58910744f0335 +Author: Dinesh Israni +Date: Sun May 2 18:27:48 2021 -0700 + + Update schedule package to use namespaced schedule policy + + If the namespaced policy doesn't exist it looks for the cluster scoped + policy with the same name. + Also updated all controllers to pass in namespace + + Signed-off-by: Dinesh Israni + +commit 49d22d604a33f6fbfac260bd7224433561515c28 +Author: Dinesh Israni +Date: Sat May 1 13:15:33 2021 -0700 + + Add CRD for namespace scoped schedule policy + + Signed-off-by: Dinesh Israni + +commit f4ae1e890236a3355a5d475722baa020bccd9baa +Author: Dinesh Israni +Date: Sat May 1 13:15:04 2021 -0700 + + Fix code gen for crds + + Signed-off-by: Dinesh Israni + +commit c7e2ddb6c30e3439cfb0670524939ce958cb0198 +Author: Dinesh Israni +Date: Sat May 1 13:13:15 2021 -0700 + + Vendor update + + Signed-off-by: Dinesh Israni + +commit 5a5403beeb5483b24f1501a02d9902ab80e465a3 +Author: Ram +Date: Fri May 7 15:52:40 2021 +0530 + + Configure rsync time for application controllers + + - avoid sdk update if backup is already on final stage + + Signed-off-by: Ram + +commit 23e047a46a877f3c4b02d87dce02e1ed24379eba +Author: Rohit-PX +Date: Wed Apr 28 17:34:44 2021 -0700 + + Add scaled cloudsnapshot test + + Signed-off-by: Rohit-PX + +commit 6b98cc805c5fe13cb6363c75b9de9382bde49177 +Author: Rohit-PX +Date: Thu Apr 15 11:52:28 2021 -0700 + + Update the ubuntu repo in travis yaml file + + Signed-off-by: Rohit-PX + +commit 467fbbf7c0dfc19ba34e16c31ee83898ee32a7f2 +Author: Rohit-PX +Date: Thu Apr 15 10:40:09 2021 -0700 + + Add permissions for stork-scheduler for k8s 1.21 + + Signed-off-by: Rohit-PX + +commit 362c554a2eed8df8599b65d7943264118fda0efc +Author: Ram +Date: Thu Apr 8 12:20:49 2021 +0530 + + Update grafana dashboard for changed metrics name + + Signed-off-by: Ram + +commit c9a91c3eb342a719d4e1f470259a016795f51727 +Author: Rohit-PX +Date: Mon Apr 5 18:10:25 2021 -0700 + + Vendor updates for torp and schedops + + Signed-off-by: Rohit-PX + +commit 4ea4aa09fac700aa5d3b0d532a752919e15d5323 +Author: Ram +Date: Thu Apr 1 23:10:40 2021 +0530 + + Add pluralizer rules for prometheus crd + + Signed-off-by: Ram + +commit 30fe03cc1e3360d2448782da6b5f8d5e62077dd5 +Author: Ram +Date: Tue Mar 30 22:09:48 2021 +0530 + + rename stork prometheus metrics with standard prefix + + Signed-off-by: Ram + +commit 241d285656f649e5efd5c1c9dd6ae53a4640e221 +Author: Ram +Date: Thu Mar 25 21:59:46 2021 +0530 + + Vendor updates for torpedo,sched-ops + + Signed-off-by: Ram + +commit ecb036fcdacd49de18ce4643c21085a9289b3d91 +Author: Ram +Date: Thu Mar 25 21:59:11 2021 +0530 + + Integration test for webhook allow dryRun options + + Signed-off-by: Ram + +commit 0e759fe18fdca84ae875bb37f01f7cc65ad7162f +Author: Rohit-PX +Date: Mon Mar 22 14:42:14 2021 -0700 + + Clusterpair failure tests + + Signed-off-by: Rohit-PX + +commit 84fd96942e7e580aca453d5c881edb19e5a574d4 +Author: Ram +Date: Mon Mar 22 09:49:06 2021 +0530 + + Add appreg suspend support for CR's + + - perconadb + - prometheus + - rabbitmq + - kafka (strimzi) + - postgress(acid.zalan.do) + + Signed-off-by: Ram + +commit 34b4295cddfb3f50a4e90d2632d0e5d12bdc3870 +Author: Ram +Date: Fri Mar 19 09:14:49 2021 +0530 + + prometheus alerts for stork metrics + + Signed-off-by: Ram + +commit 309caf861575c27344a573a1199e8f4cd05ba437 +Author: Grant Griffiths +Date: Tue Mar 2 20:21:56 2021 -0800 + + Fix API breaking changes and makefile + + Signed-off-by: Grant Griffiths + + Modify the UTs to adhere to new k8s 1.20 client-go fake client. + + Signed-off-by: Aditya Dani + +commit 5e50c870b93868dcb8c0bdc8ff355bcc8f5d22cd +Author: Grant Griffiths +Date: Tue Mar 2 20:21:32 2021 -0800 + + Go modules and k8s 1.20 client upgrade + + Signed-off-by: Grant Griffiths + +commit 6a6555e31aed9a8d57c24ce46cc62334b402dd90 +Author: Ram +Date: Thu Feb 18 23:31:58 2021 +0530 + + Detect newly registered crd and create appreg entry in stork + + Signed-off-by: Ram + +commit 625843e0adaaf0d862f2d17a542922ac6fcfd4f5 +Author: Ram +Date: Mon Feb 1 11:32:22 2021 +0530 + + stor340: Redo cluster-pairing automatically + + Signed-off-by: Ram + +commit 9161d8c105bf53a8320134e09706ee04c36de271 +Author: Rohit-PX +Date: Thu Mar 18 12:38:06 2021 -0700 + + Add correct CLI option description for test-deploy + + Signed-off-by: Rohit-PX + +commit 016d319198f857e6221bd6e3bd2131b726a86657 +Author: Ram +Date: Mon Mar 15 16:54:19 2021 +0530 + + configure side-effect parameter for stork webhook + + Signed-off-by: Ram + +commit f4dec0e2027d9715022845068f3a67a4c757cc7a +Author: Rohit-PX +Date: Wed Jan 6 22:41:27 2021 -0800 + + Add separate CLI params for test, source and dest kubeconfig + + Signed-off-by: Rohit-PX + +commit 24958a4afe4340577f729d5fdfcd898769ed27e1 +Author: siva-portworx +Date: Wed Mar 10 13:04:48 2021 -0500 + + Added logic to exclude the controlled resources from GetResources API. + + - Define a list for excluding controlled resources being returned + as part of response of resourceCollector GetResources API. + - For now added configmap kube-root-ca.crt, which was getting created by + kube-controller-manager in every namespace. + +commit 23c03ff8b8e7093d8f786db933921c7cb3599908 +Author: Prashanth Kumar +Date: Wed Mar 10 03:17:22 2021 -0500 + + Adding support for fetching resources based on requested resource types + + Signed-off-by: Prashanth Kumar + +commit 3f9d85b6a2bdc01c6571ffb16cf87c895b9e27c2 +Author: Rohit-PX +Date: Mon Mar 15 12:31:31 2021 -0700 + + Add sec annotations to create mig method, check for auth in tests + + Signed-off-by: Rohit-PX + +commit a4a9149efb22a87292a9f268c5195004b6bf9234 +Author: siva-portworx +Date: Thu Mar 11 05:13:08 2021 -0500 + + Updating clusterIps field of service to nil in prepareServiceResourceForCollection. + +commit e30034fe41997cf2c67a87842c9db4ad0d0aa606 +Author: Ram +Date: Mon Jan 11 23:52:59 2021 +0530 + + vendor updates sched-ops + + Signed-off-by: Ram + +commit 5237b72f534ed106ff7cb652304398f186ffa776 +Author: Ram +Date: Mon Jan 11 23:52:34 2021 +0530 + + addeds UTs for migration/backup schedule metrics + + Signed-off-by: Ram + +commit dcd3673d9fd0d25b7c0ac8786722ad59c6a63815 +Author: Ram +Date: Thu Jan 7 13:29:23 2021 +0530 + + stor-331: add metrics support for backup/migration schedules + + Signed-off-by: Ram + +commit c6aa72f7275a855380b9cddc448fd038ab22eb34 +Author: Ram +Date: Tue Mar 2 23:39:45 2021 +0530 + + Use python3 version to install libs for google-sdk + + Signed-off-by: Ram + +commit 810667298b0bd7e4622563cf5eea11d733ecf8e5 +Author: Ram +Date: Fri Dec 18 23:57:08 2020 +0530 + + add grafana dashboard for migration and application controllers + + Signed-off-by: Ram + +commit df631f75442a9bbbefb7808afee9b60f993eea1b +Author: Ram +Date: Mon Nov 30 23:27:57 2020 +0530 + + Add Readme for stork metrics setup instructions + + Signed-off-by: Ram + +commit a83d476388327abb6cc156111baf5ef4f32c9493 +Author: Dinesh Israni +Date: Fri Feb 12 07:29:57 2021 -0800 + + Get ApplicationRegistrations only once when preparing resources + +commit a97d89752b7417f34fdebb444f2807bc408c25cf +Author: sivakumar subramani +Date: Wed Feb 3 02:58:10 2021 -0500 + + Reduce the backupVolumeBatchCount value to 3 from 10, to avoid timeout failures + Added BACKUP-VOLUME-BATCH-COUNT env and made default batch count to 3 + +commit c378eb4f0f0ad7e7417787e98837dff3cb9d143f (origin/2.6.0) +Author: Dinesh Israni +Date: Tue Jan 26 21:28:20 2021 -0800 + + storkctl: Check for error state of snapshot + +commit 771dbcf6355d16b2f0c515afdec96a892fb6df5c +Author: Dinesh Israni +Date: Tue Jan 26 21:27:59 2021 -0800 + + Vendor update for snapshotter + +commit 999b4fe57514ffebbd509426d8a0b793ca07fb41 +Author: Ram Suradkar +Date: Mon Jan 4 16:06:06 2021 +0000 + + add support for replicaset backup/migration + + Signed-off-by: Ram Suradkar + +commit b59f19a4fd701fc4fa849e1563de2d6a2594cc64 +Author: Ram Suradkar +Date: Fri Jan 22 10:35:59 2021 +0000 + + Create pvc objects if volumeOnly migration is enabled + + Signed-off-by: Ram Suradkar + +commit 7f434b6459066a997ba0ce112c782f4e8f058847 +Author: sivakumar subramani +Date: Fri Jan 22 14:31:29 2021 -0500 + + Corrected the error handling, while calling CloudBackupCreate api. + +commit 62471255a1950f0e030d96dc3ab729202bb8ac79 +Author: Prashanth Kumar +Date: Thu Jan 21 12:00:25 2021 -0500 + + [Portworx]: Setting backup size to zero on failure when fetching size + +commit e25223e916556a5d268a5f5fbe69e1f6c173ab3b +Author: sivakumar subramani +Date: Thu Jan 21 13:52:33 2021 -0500 + + Fixed the issue in updating volumeInfo, when all volumes are successful. + +commit 13500a25d0594facc6040b825d0b8b7033a78387 +Author: Rohit-PX +Date: Tue Jan 19 18:19:37 2021 -0800 + + Explicitly add linux as the GOOS + + Signed-off-by: Rohit-PX + +commit eab938d15257c9c8e2c0a063107ac393fcb56692 +Author: sivakumar subramani +Date: Tue Jan 12 11:04:50 2021 -0500 + + Passing namespace list in batches to resourceCollector.GetResources() + +commit e8f89f1bdccc1c40b2da55520e148f697922bbb8 +Author: Dinesh Israni +Date: Sun Dec 20 14:17:31 2020 -0600 + + Use path from updated backup when checking for deletion + +commit 1248b13b365b56c43cd7c204dc12315ee471e0e6 +Author: Ram +Date: Sun Dec 20 14:38:34 2020 +0530 + + vendor update apiextension fake client + + Signed-off-by: Ram + +commit 44130387281274e46f0972dea9f313c05f3d626c +Author: Ram +Date: Sun Dec 20 13:49:17 2020 +0530 + + wait for crds to register while starting metrics collection + + Signed-off-by: Ram + +commit e50f35b1d1d331969bfcc7e7c443e7fb66b3b3f0 +Author: Dinesh Israni +Date: Fri Dec 18 20:14:24 2020 -0600 + + Don't check for namespace mapping again when finding objects to delete + + Namespace is already being mapped to the destination + +commit 26f1105e639f60d0cabf9d6a601df507215ef3ef +Author: Ram +Date: Fri Dec 18 19:06:11 2020 +0530 + + retry v1 crd registration while doing app restore + + - migration: err check crd reg before validating crds + + Signed-off-by: Ram + +commit 9359272b8963f4011887d4e31935f7bbd6bc9cf6 +Author: Ram +Date: Tue Dec 15 20:44:44 2020 +0530 + + vendor updates sched-ops + + Signed-off-by: Ram + +commit e43523786e8d4eca46024fc974c5b4bf1687203c +Author: Ram +Date: Tue Dec 15 19:33:22 2020 +0530 + + Use apiextensionV1 api to register CRDs objects + + - store replica count for crd in annotation + - use replica count for crd to disable/enable CR resources + - apply crds as per apiversion for migration + - for v1beta1 to v1 conversion set x-kubernetes-preserve-field to true + + Signed-off-by: Ram + +commit 65ef486daaec48740c2c1808c791401d64f42977 +Author: sivakumar subramani +Date: Thu Dec 17 07:37:05 2020 -0500 + + Added retry logic, while updating the ApplicationBackupStageApplications status to CR. + + If the CR update fails, we retry for 10 times with 10 sec delay. + +commit 86bb1aeffed41cc49eeb31290e96498c922c5947 (origin/master-stork-pb1006) +Author: Grant Griffiths +Date: Mon Dec 14 14:54:34 2020 -0800 + + [CSI] Add creation of default VolumeSnapshot classes + + Signed-off-by: Grant Griffiths + +commit 2abeb00f8fda83faa410b949aefe060d440bd0d0 (origin/master-stor-319) +Author: Ram +Date: Tue Dec 15 12:27:59 2020 +0530 + + Misc fix - avoid unnecessary export of metrics variables + + - addressed review comments + + Signed-off-by: Ram + +commit f9aa45605f36483075bd1de7a8e8e141dd564630 +Author: Ram +Date: Tue Dec 15 12:13:53 2020 +0530 + + UT's for stork prometheus metrics + + Signed-off-by: Ram + +commit f5080570034c1cf10a622223ac050b8298e33602 +Author: Ram +Date: Tue Dec 8 19:27:49 2020 +0530 + + vendor updates sched-ops + + Signed-off-by: Ram + +commit 9e6f90c25474e3e0b5bd9ed95475e09572dc4c9c +Author: Ram +Date: Tue Dec 8 19:26:59 2020 +0530 + + prometheus metrics for stork controllers + + - applicationbackup, restore and clone + - clusterpair, migrations + + Signed-off-by: Ram + +commit 85fd554ba204a6122c287b1e6cf2e5e4ef0b7fe6 +Author: sivakumar subramani +Date: Fri Nov 20 10:38:28 2020 -0500 + + Added retry logic for CloudBackupCreate failure. + +commit 057cf6eddca9f744e3cbe8d80c00aa7f860b1c0c +Author: Grant Griffiths +Date: Thu Dec 3 17:32:08 2020 -0800 + + [CSI] Remove cleanup checks for GetBackupStatus + + Signed-off-by: Grant Griffiths + +commit fe97def95ced938b34f64b2335610ea263e13c04 +Author: Grant Griffiths +Date: Thu Dec 3 23:11:40 2020 -0800 + + [CSI] Add CSI PV and PVCs back into restore resources + + Signed-off-by: Grant Griffiths + +commit 6cc44cc09147c696e92c0f80f6810f2c73d02bb0 +Author: sivakumar subramani +Date: Tue Dec 1 13:31:15 2020 -0500 + + Applying IncludeResource filter before deleting resources for CSI driver + +commit 6ec732a6982020fa3463cae399b5b43d9369e0b0 +Author: sivakumar subramani +Date: Mon Nov 30 08:39:58 2020 -0500 + + Avoid including non-csi volume ad part of csi's GetBackupStatus API. + +commit f4170587fb75644fd06a777efd8d48a3fb6ce40e +Author: sivakumar subramani +Date: Fri Nov 27 02:32:39 2020 -0500 + + Added retry logic, when CR updates fails for Inprogress status. + + - Also return VolumenInfo list in the case of failure in GetBackupStatus + implementation in csi driver. + +commit 17590efcd701adf12bc24ece11bf518143990859 +Author: Dinesh Israni +Date: Tue Nov 24 22:34:41 2020 -0600 + + [CSI] Make the last part of the backup name unique + +commit 7ed4cb22e74b37ddcdf374fc5a36faf64bd9e349 +Author: Grant Griffiths +Date: Thu Nov 19 22:53:20 2020 -0800 + + [CSI] Fix multi-namespace backups + + Signed-off-by: Grant Griffiths + +commit ad3167ab2f03abd518d6b0462f485d0b1e74062c +Author: Grant Griffiths +Date: Tue Nov 17 10:26:29 2020 -0800 + + [CSI] Restore cleanup without backup obj and various fixes + + - Restore cleanup and cancel no longer depends on backup obj being + present. + - Use PVC size for backup/restore info sizes + - Fix VolumeSnapshot skip check to not look at group + + Signed-off-by: Grant Griffiths + +commit 2bfd876b181f530f3ff989089b16622e06ab4a09 +Author: Grant Griffiths +Date: Fri Nov 13 21:26:15 2020 -0800 + + [CSI] Add support for Replace Policy + + Signed-off-by: Grant Griffiths + +commit fa169aa4df8846006cad9ee3c6563fe41e3b1190 +Author: Ram +Date: Mon Oct 26 12:04:15 2020 +0530 + + vendor updates - sched-ops + + Signed-off-by: Ram + +commit 184358e90a046cf81b0108adb5e4d97204befece +Author: Ram +Date: Mon Oct 26 12:02:40 2020 +0530 + + allow disabling cronjob support for migration + + - add storkctl option to activate/deactivate cronjobs + + Signed-off-by: Ram + +commit a8160b590191e06b3fa6d11f31055bc731719fd6 +Author: Grant Griffiths +Date: Fri Nov 13 17:37:49 2020 -0800 + + [CSI] Namespace mapping, skip VS obj in includeObject, and UID fix + + Signed-off-by: Grant Griffiths + +commit 26fa22e1220f34fc3e1732cf6c18a14ef84ac15a +Author: Rohit-PX +Date: Thu Nov 12 18:28:47 2020 -0800 + + Vendor changes for torpedo + + Signed-off-by: Rohit-PX + +commit 5be37c9e6b59c5b7d9f8412fbb10ad471adadd48 +Author: Rohit-PX +Date: Tue Nov 3 12:51:03 2020 -0800 + + Add new param for config map name for generic csi drivers + + Signed-off-by: Rohit-PX + +commit e4cb462a5fffd36956c26bb8acb392aef2fe135c +Author: Prashanth Kumar +Date: Tue Oct 27 18:25:58 2020 +0000 + + Reporting volume size in bytes for EKS + +commit d8a220614e1111679d4ee074d1922ffc9d7aff7c +Author: Dinesh Israni +Date: Thu Nov 12 13:26:40 2020 -0800 + + Prune migrations even for PartialSuccess + + Some resources might always fail to be migrated, this causes the status for all + PartialSuccess migrations to be saved. Only need to keep the last one in this + case + +commit b798b8a785754906efb3cda7fce64d3633c047b2 +Author: Grant Griffiths +Date: Thu Nov 12 13:30:03 2020 -0800 + + [CSI] Fix CSI Backup to correctly cleanup on failures + + Signed-off-by: Grant Griffiths + +commit 60fe6a774b34b146ced0c42a658c1a7d89105c0c +Author: Grant Griffiths +Date: Tue Nov 10 17:25:58 2020 -0800 + + [CSI] Make VolumeSnapshot objects unique per backup request + + Signed-off-by: Grant Griffiths + +commit 7f5bc87677c668cb7906d5492e7252ff92ef9d92 +Author: Dinesh Israni +Date: Wed Nov 11 13:46:25 2020 -0800 + + Use client from resource collector when checking for PVC ownership + + Without this, if the resource collector is called for a remote cluster it tries + to fetch objects from the local cluster + +commit 38d537860e07ba14bf1c033ad250a2f0c1cc46d7 +Author: Grant Griffiths +Date: Tue Nov 10 03:05:30 2020 -0800 + + [CSI] Add check to make sure VolumeSnapshot and VSC are cleaned up + + Signed-off-by: Grant Griffiths + +commit 52f8f1ff2bce1fec83e3701b21dd2ef062ea1248 +Author: Grant Griffiths +Date: Tue Nov 10 01:49:57 2020 -0800 + + [CSI] Only remove bind-completed and bound-by-controller annotations + + - On CSI restore, we used to remove all kubernetes.io annotations + - We must only remove bind-completed and bound-by-controller as there + are other valid kubernetes.io annotations + + Signed-off-by: Grant Griffiths + +commit 302d84567bc648555cb65d148effe35ed27b49a0 +Author: sivakumar subramani +Date: Wed Oct 28 23:22:57 2020 -0400 + + Added log level change step in SIGUSR1 singal handler of dbg pkg. + +commit 263a748f2e2020410cea6875f43c276bff29fe7b +Author: Grant Griffiths +Date: Wed Oct 28 17:19:34 2020 -0700 + + Don't skip VolumeName update on CSI driver check failure + + Signed-off-by: Grant Griffiths + +commit 45b8e5bb284ba103c7d02e3276a47f50d899b07d +Author: Dinesh Israni +Date: Tue Oct 27 10:24:56 2020 -0700 + + Don't collect cluster scoped resources from ApplicationRegistration + +commit ad607f0fe8d64def47ba3e284cca45fd99c9e387 +Author: Dinesh Israni +Date: Tue Oct 27 10:14:39 2020 -0700 + + Fetch ApplicationRegistration only once when preparing resources + +commit 4df3ef56e90025b567e911d22989487a28dcb7e5 (origin/stor-292) +Author: Rohit-PX +Date: Thu Sep 10 18:53:31 2020 -0700 + + Add security annotations to CRDs for auth-runs + + Signed-off-by: Rohit-PX + +commit acbcc4d6f412159be248d8dbc3d0d7a36f98743a +Author: Grant Griffiths +Date: Thu Oct 15 13:03:25 2020 -0700 + + [CSI] Add restoreInfo for initial restore and during status check + + Signed-off-by: Grant Griffiths + +commit 82b35eade174bac1ef6d9f9e5e814bab519bb35e +Author: Ram +Date: Thu Sep 24 23:24:39 2020 +0530 + + add UT's for stork extender and health monitor metrics + + Signed-off-by: Ram + +commit 96c79ea053c8011fd24b352c865133caa7e6da9d +Author: Ram +Date: Thu Sep 24 23:22:44 2020 +0530 + + re-org metrics constant to respective pkgs + + Signed-off-by: Ram + +commit 947a6d8f23c106585647f253014345bb3583a0c9 +Author: Ram +Date: Thu Sep 24 23:21:36 2020 +0530 + + vendor updates prom testutil pkg + + Signed-off-by: Ram + +commit ec5afdb15404076e3a146dfd53157ae4c6f8a054 +Author: Ram +Date: Wed Sep 2 23:39:40 2020 +0530 + + Specs to enable promethous metrics for stork + + Signed-off-by: Ram + +commit 453be91054eff710cb29af4fb1ee42fc0e3262ed +Author: Ram +Date: Wed Sep 2 23:37:47 2020 +0530 + + Prometheus metrics for stork extender and monitor + + - added prometheus metrics for stork extenders which + covers hyper,non and semi-hyperconverged pod counter + - added stork monitor metrics for no of pod rescheduled + by stork monitor + + Signed-off-by: Ram + +commit 7428dcbdbde0aae8fdc94f671a9f32ad6c14648c +Author: Grant Griffiths +Date: Tue Oct 13 10:18:47 2020 -0700 + + Fix for non-CSI PVC restore not erroring out + + Signed-off-by: Grant Griffiths + +commit 9b16a998e32b64c72efdf40cf7d8c9b8411894c8 +Author: sivakumar subramani +Date: Sat Aug 29 11:39:56 2020 -0400 + + Submiiting the volume StartBackup in a batch count of ten. + + - This way, we will update the volume backup status + frequently to the CR content. + +commit 6254ad1390988ae2f183ec8f1e85d8cb071c3761 +Author: Grant Griffiths +Date: Mon Jul 20 22:13:20 2020 -0700 + + Add CSI driver snapshot implementation + + Signed-off-by: Grant Griffiths + +commit aa955bb559b2befc6bbbe17881bcdad09545c1cc +Author: Grant Griffiths +Date: Mon Jul 20 22:12:25 2020 -0700 + + Add external-snapshotter to vendor for snapshotter client + + Signed-off-by: Grant Griffiths + +commit eb6c180439ad1136182e9b3f1477a63ca6bc9e0e +Author: Ram +Date: Thu Sep 10 21:53:37 2020 +0530 + + vendor updates for openstorage-7.0 + + Signed-off-by: Ram + +commit 717c7e9e2deaa2ecdad7d57b2428fe5521ba3497 +Author: Ram +Date: Thu Sep 10 21:51:59 2020 +0530 + + [portworx] Allow passing delete local snapshot flag to CloudSnapCreate api + + Signed-off-by: Ram + +commit 372aaa37b267008f09f08dd605bed192de5d20cd +Author: Ram +Date: Thu Sep 3 12:56:30 2020 +0530 + + Check for clusterdomains status only if sync-dr is configured + + Signed-off-by: Ram + +commit 84e4083dc39bf40b6c99f786240725bdfc908cdb +Author: Ram +Date: Wed Sep 16 10:46:38 2020 +0530 + + Update go version to 1.13.1 for integration test container + + Signed-off-by: Ram + +commit 7ac0941b63a97e00c4be777f7b0e0b116975e00c +Author: sivakumar subramani +Date: Thu Sep 10 05:56:34 2020 -0400 + + Applying label selector for clusterRole, clusterRoleBinding and ServiceAccount resources. + +commit 1c2360c11cfe5029d00884c4cded3483b432e2d6 +Author: Ram +Date: Mon Sep 14 23:09:53 2020 +0530 + + UT's to test suspend/resume of multiple migrSchedules + + Signed-off-by: Ram + +commit 17295a28a23695781b028b88b2db21459d295001 +Author: Ram +Date: Mon Sep 14 22:51:35 2020 +0530 + + Fix multiple suspend/resume migrSchedule issue + + Signed-off-by: Ram + +commit 26af754f4f441db6582f143757d5a89386a1c83c +Author: Ram +Date: Fri Sep 11 22:16:52 2020 +0530 + + Wait for resource deletion before re-creating during migration + + Signed-off-by: Ram + +commit 76504464515e9c9f95c59d8d4897e9baeaf9ad5c +Author: Dinesh Israni +Date: Fri Sep 11 00:12:21 2020 -0500 + + Check for permission error when processing objects in resource collector + +commit a168522973f7d76fdb6b84b66ab86350e4b660a1 (origin/master-stork810) +Author: Luis Pabón +Date: Wed Sep 9 16:09:40 2020 -0400 + + Revert "Add authentication labels to volume" + + This reverts commit ab2ab4a00d358f1302ea9ba548f1b4866c6d86c9. + +commit ab2ab4a00d358f1302ea9ba548f1b4866c6d86c9 +Author: Luis Pabón +Date: Tue Sep 8 21:00:39 2020 -0700 + + Add authentication labels to volume + + If a request is authenticated, then the resulting volumes must have + the labels pointing to the authentication information + + Signed-off-by: Luis Pabón + +commit 84fb515ef5275afc9a9453065c4a1405558a2a7a +Author: Dinesh Israni +Date: Tue Sep 8 12:18:15 2020 -0500 + + Skip types in resource collector for forbidden errors + +commit 849b951f6a1ea3efee12193d6f2f2ebdc26f0374 +Author: sivakumar subramani +Date: Wed Sep 2 09:08:07 2020 -0400 + + Continuing with next namespace creation, if current ns already exists. + +commit 8b66f7b32f20e7c102ac0e7a6487d2d8f4b3d331 +Author: Rohit-PX +Date: Mon Aug 31 20:05:54 2020 -0700 + + Vendor updates for torpedo/auto-pilot/apimachinery + + Signed-off-by: Rohit-PX + +commit cb114b6b67e5c75ac70e5fd9ad4758a44fdf707e +Author: Rohit-PX +Date: Tue Jul 21 22:49:48 2020 -0700 + + Use objectstore drivers to validate deletion of app backups + + Signed-off-by: Rohit-PX + +commit c0553effb7673a45073005fa6c5e3e9e485479f0 +Author: Dinesh Israni +Date: Tue Sep 1 10:05:13 2020 -0700 + + Set Group to core fif empty when creating object map + +commit 373647b437fb7d9d6f10134ca598b1bd3f044233 (origin/master-stork-263) +Author: Dinesh Israni +Date: Tue Aug 25 16:36:20 2020 -0700 + + Add support to specify resources during application backup + +commit 915046a60cd113e02af7c8cf8236ceaa4d2928a4 +Author: Ram +Date: Fri Aug 21 17:52:59 2020 +0530 + + vendor updates sched-ops + + Signed-off-by: Ram + +commit ea687f06194a5cb8bb63f542fac4c681a4c40ce8 +Author: Ram +Date: Mon Aug 24 22:59:21 2020 +0530 + + Add debug logs to webhook server + + Signed-off-by: Ram + +commit b773b813de577ab22cddf682ef44a90f9a063af7 +Author: Ram +Date: Fri Aug 21 17:51:15 2020 +0530 + + Register all crds found at startup + + - create applicationregistration cr for all crds + present at k8s server + + Signed-off-by: Ram + +commit 04eb8c25862aa40fb532b0c795d9b529cebfca2d +Author: Ram +Date: Fri Aug 21 10:43:31 2020 +0530 + + [Portworx] Restrict backuplocation for migration in px < 2.6 + + Signed-off-by: Ram + +commit f1504ba8bf6771ed732ed3aee3c84540ad762763 +Author: Ram +Date: Thu Aug 20 00:00:09 2020 +0530 + + create backuplocation on destination cluster if passed as pair option + + Signed-off-by: Ram + +commit 931f76d1dd64c38ffa11a6fa6ed9ae2e31775f2c +Author: Ram +Date: Wed Aug 19 23:58:59 2020 +0530 + + [portworx] pass in backuplocation as credid to clusterpair create api + - use IoProfileBkupSrc for volume restore + Signed-off-by: Ram + +commit 267a2d45c45a8035284a0a32e1030da4a5ec23a3 +Author: Ram +Date: Wed Aug 19 23:58:16 2020 +0530 + + vendor updates - libopenstorage/openstorage + + Signed-off-by: Ram + +commit 6c183fbe824476cb552c9b617f1378036b35409f (tag: v2.5.0-rc1) +Author: Prashanth Kumar +Date: Mon Aug 17 07:48:49 2020 +0000 + + Adding actual backup size in backup volume info + +commit 026b75e319a6ce62a37f0c5e1a3a534ef771f5d0 +Author: Prashanth Kumar +Date: Sun Aug 16 03:57:16 2020 +0000 + + [Azure]: Fix to populate backup size for azure volumes + +commit 486be964eb459ef0f861705f38f512c092f1ed08 +Author: Prashanth Kumar +Date: Thu Aug 13 16:46:56 2020 +0000 + + Passing empty endpoint to AWS objectstore operations + + AWS SDK fetches correct endpoint based on provided region + +commit 9137cf544fca3fd772f9c13ee6361ae231dcac19 +Author: siva-portworx +Date: Mon Aug 10 12:29:00 2020 +0000 + + Added check for Notfound error in GetRestoreStatus of all driver. + + - In all driver module except portworx, add check in get volume + status during restore to mark the restore of the volume, if we + NotFound error, while querying volume status. + +commit 843d3b6f920cdbe545438b4e22fb5a7573d16d4e +Author: Prashanth Kumar +Date: Mon Aug 17 18:21:02 2020 +0000 + + Computing total backup size before uploading metadata.json file + +commit 1dac4379fc8e18e09500b98f59368c7f0d93c786 +Author: Dinesh Israni +Date: Thu Aug 13 20:04:35 2020 -0700 + + Don't do namespace check in ApplicationBackup if it is completed + +commit db9a159795aaa698138d685c4dc27b92aeaa69ff +Author: Dinesh Israni +Date: Thu Aug 13 20:04:16 2020 -0700 + + Only create namespaces that are being restored + +commit c1f4dba5f8ca2b034d56b9b235e11d7b0903277f +Author: Dinesh Israni +Date: Thu Aug 13 19:52:34 2020 -0700 + + Use core as group when checking for PVC to restore + +commit 55a355495cb2098d9961ad552246d51580b3ddb2 +Author: Dinesh Israni +Date: Mon Aug 10 21:02:55 2020 -0700 + + ResourceCollector: When checking for resources set group name for core + + The group in the resource is empty, need to set it to core before comparing + +commit 890250ca53187c614635e6b9d0fb25605a3cf809 +Author: Dinesh Israni +Date: Thu Aug 6 16:30:32 2020 -0700 + + Update Rule spec to take in container name + + This is required since a pod could have multiple containers and we + need to know which one we should run the commands in + +commit ddc1313ace02d7bf3d273d8133af5211f9382af2 +Author: Dinesh Israni +Date: Thu Aug 6 16:29:58 2020 -0700 + + Add support to pass in * to select all namespaces to backup + +commit 20d6600800dc043293219c82edf30a3a39c5f74f +Author: Harsh Desai +Date: Sat Aug 8 15:32:26 2020 -0700 + + Update copyright to 2020 + +commit 03645313f300e57d269d019aa9aef1ebd47090dd +Author: Dinesh Israni +Date: Thu Aug 6 22:25:51 2020 -0700 + + Add support to collect LimitRange for Migration/ApplicationBackup/Clone + +commit 38a3d0e88f11c1957056cd9266a745fedccd08b9 +Author: Ram +Date: Tue Aug 4 20:56:43 2020 +0530 + + allow string type for appreg suspend option + + Signed-off-by: Ram + +commit c560870c6513cf0ae7c010785a3fbc3328929fd1 +Author: Ram +Date: Thu Jul 30 21:03:42 2020 +0530 + + vendor updates sched-ops + + Signed-off-by: Ram + +commit c1f5b284fd6001cd2185b8eb1ff407d2e4fe698c +Author: Ram +Date: Mon Jul 13 16:59:46 2020 +0530 + + storkctl ux enhancement + - accept storage option for clusterpair + - add option to allow validating migration spec + - add watch option to storkctl stork resources + - fix error checks + + Signed-off-by: Ram Suradkar + +commit 6bd079c2dbdfcfa6775cf14808a6dfe1b6626720 +Author: Grant Griffiths +Date: Thu Jul 30 10:27:44 2020 -0700 + + [Portworx] Move jwtIssuer to be configurable env var + + Signed-off-by: Grant Griffiths + +commit f8508819999ae828a7297efe5e39a9e738e41c4f +Author: Grant Griffiths +Date: Tue Jul 28 14:04:56 2020 -0700 + + [Portworx] Add check for PX version check for jwt issuer + + Signed-off-by: Grant Griffiths + +commit c885fcc517746e19e09c2be216517a18532093a9 +Author: Rohit-PX +Date: Wed Jul 22 17:40:18 2020 -0700 + + Create new bucket for stork integration tests + + Signed-off-by: Rohit-PX + +commit a13cf62cb58a033cae5451062f0070a046ea72bc +Author: Dinesh Israni +Date: Thu Jul 23 09:51:32 2020 -0700 + + Vendor update for sched-ops + +commit 2913e9a4dbbd11c86054f60c20f25e4fbdab38b5 +Author: Ram +Date: Thu Jul 23 22:09:56 2020 +0530 + + ignore if appregistration cr is not present on cluster + + Signed-off-by: Ram + +commit 79beb7b368f2cbdde72f29a95b956b4794363824 +Author: siva-portworx +Date: Mon Jul 20 13:09:10 2020 +0000 + + Added fix to not to wait for the re-created resources in DeleteResources API. + +commit d71a72639040a642c645d35efea87af41d2b9f47 +Author: Ram +Date: Thu Jul 23 17:30:56 2020 +0530 + + use resourcecollector k8sclient instead of schedop client to list + crd resources + + Signed-off-by: Ram + +commit d0f437ecf0bc0ab34c81bf189c410e9807657912 +Author: Ram +Date: Thu Jul 23 12:58:51 2020 +0530 + + vendor updates torpedo,schedops + + Signed-off-by: Ram + +commit 9894cc3a0c74ff05b6e50f0a67facc86a21b55cf +Author: Ram +Date: Thu Jul 16 21:55:05 2020 +0530 + + update sched-ops createNamespace api + - address review comments + + Signed-off-by: Ram + +commit 156ab1456043087e55591a8ea23bab390a9cfeb9 +Author: Ram +Date: Wed Jul 8 23:43:17 2020 +0530 + + replace namespace metadata if namespace already exists + + Signed-off-by: Ram + +commit 6e7c4ceea12ac211d7de9725f3927f9c1127b134 +Author: Ram +Date: Wed Jul 8 00:36:03 2020 +0530 + + keep namespace metadata for application backup,restore & clone + + Signed-off-by: Ram + +commit db41af12556938bb6801dba12618b2e7b6b9d131 +Author: Dinesh Israni +Date: Tue Jul 21 17:11:42 2020 -0700 + + Add some missing storage classes in integration tests + +commit 8a3223c18abdc4ef69acead06180bb04e691b96c +Author: Dinesh Israni +Date: Tue Jul 21 16:50:02 2020 -0700 + + Ignore empty pre and post exec rules + +commit b59a8963fb5722a45f6df5ca93653d768cec1655 +Author: Ram +Date: Mon Jul 20 19:54:33 2020 +0530 + + delete crd.json on applicationbackup delete retension + + Signed-off-by: Ram + +commit 4f725e7283e1795b7f001dd339b2b511f7945859 +Author: Ram +Date: Wed Jul 8 00:36:58 2020 +0530 + + handle pre/post exec rule for volumesnapshotschedules + + Signed-off-by: Ram + +commit 61017a7f1e81ca5f64dee22fddbb54a3e2a6bae0 +Author: Rohit-PX +Date: Thu Jul 16 14:40:08 2020 -0700 + + Fix expected backup count for scheduled interval backup tests + + Signed-off-by: Rohit-PX + +commit 46e9a68ba9551362f28ced6692fdd0424a013c8a +Author: Prashanth Kumar +Date: Fri Jul 17 13:55:40 2020 +0000 + + [Portworx]: Fix updating of volume info for backup size on unsupported porx version + +commit 71f3174023222e4f654f2758e821d9db338eaf49 +Author: Dinesh Israni +Date: Mon Jul 13 19:35:37 2020 -0700 + + Collect ResourceQuota objects from ResourceCollector + +commit 014b4a24bd86461a775f9b4af7a91f893f5998cd +Author: Dinesh Israni +Date: Mon Jul 13 19:34:55 2020 -0700 + + Update namespace in Role during apply in resource collector + +commit 3e71515998dc0aa678e02cd63fb3db0a08714132 +Author: Christoph Böhmwalder +Date: Fri Jul 3 10:19:15 2020 +0200 + + [LINSTOR] add volume driver for LINSTOR + + Add a new volume driver for LINSTOR, an SDS solution by LINBIT. Also + make some changes in order to be able to run the integration tests. + +commit 20fb0c4133da36eadd1f0ae38096fb1d42f1d676 +Author: Christoph Böhmwalder +Date: Fri Jul 3 10:18:13 2020 +0200 + + vendor changes for LINSTOR driver + + Update dependencies and add golinstor + +commit eeab301f7ded43f74a500199ef82f0a427f8fdc4 +Author: Dinesh Israni +Date: Mon Jul 6 21:17:18 2020 -0700 + + Skip downloading CRDs in ApplicationRestore if not uploaded + +commit d388b7b85e2b8e2897e27804ed009fedc299114d +Author: Ram +Date: Tue Jul 7 21:47:28 2020 +0530 + + disable webhook controller by default + + Signed-off-by: Ram + +commit cb6d155a6c9132654005bb331263b915d3cc56fb +Author: Dinesh Israni +Date: Mon Jul 6 20:14:03 2020 -0700 + + Skip backup of PVCs being deleted or in Pending state + +commit 42acd34471b2493040acb6dede0e8dc60bb2ab2d +Author: Ram Suradkar +Date: Thu Jul 2 11:50:49 2020 +0000 + + fix restore fail issue by removing unneccesary metadata fields + - change app reg names + - correct typo for couchbase appregistration + + Signed-off-by: Ram Suradkar + +commit 176e632c9d392e7e255ef5616de41d48ea1ab78e +Author: Dinesh Israni +Date: Wed Jul 1 15:58:28 2020 -0700 + + [Portworx] Init driver from InspectVolume if required + +commit bc1dbc43d369c21abb1340db5e0c422081bc282b +Author: Rohit-PX +Date: Mon Jun 29 10:06:22 2020 -0700 + + Separate application backups and restores for all tests + + Signed-off-by: Rohit-PX + +commit 39f960a75807bcf2f05c70157f231fb6399b9aa9 +Author: Ram +Date: Wed Jul 1 18:02:22 2020 +0530 + + allow activate/deactivate for migrated crds + + Signed-off-by: Ram + +commit f2d0fb072a1ae6257d343b95a76140db766a819b +Author: Ram +Date: Wed Jul 1 11:55:43 2020 +0530 + + Migrate resource specifc crds only + - fix review comments + - use struct for registering app resources rather than csv parsing + - use group/version/kind while collecting resources + Signed-off-by: Ram + +commit bb59ff5a4a461862d4e4fc6281cb883fcfc70ed9 +Author: Ram +Date: Tue Jun 30 22:47:51 2020 +0530 + + add uts for appreg cli + + Signed-off-by: Ram + +commit a49c641ebbf30486a0b336113fccc9bb70a08798 +Author: Ram +Date: Tue Jun 30 20:54:21 2020 +0530 + + skip migrating crd if app not registered + - fix duplicate app registration + + Signed-off-by: Ram + +commit be71d0de69a12e192db27f692887b4273338e149 +Author: Ram Suradkar +Date: Tue Jun 30 06:16:10 2020 +0000 + + vendor updates for sched-ops + + Signed-off-by: Ram Suradkar + +commit 8464c12f8848a670a60f94444aba6a428f917864 +Author: Ram +Date: Tue Jun 30 11:11:14 2020 +0530 + + Create app registration for already supported CRD's + - add storkctl option for retriving app reg + + Signed-off-by: Ram + +commit e1fc97ab90374bab47c55709f7a37b1b92edb832 +Author: Ram +Date: Fri Jun 19 23:44:43 2020 +0530 + + Support generic CRD migration,backup and restore + + Signed-off-by: Ram + +commit 6d7585c1c220ad3823f60a598cf8a6ba745a1ff6 +Author: Prashanth Kumar +Date: Tue Jun 30 08:54:27 2020 +0000 + + Adding volume size to restore CR + +commit 82133ed72e6bfb9d3a9ca2a02f70a810ab4fcfa6 +Author: Prashanth Kumar +Date: Tue Jun 30 08:52:14 2020 +0000 + + [Portworx]: Driver changes for the following + - Returning backup size zero for older portwrox driver which doesnt + support OSD Size() call + - Adding size to restore volumes + +commit 9255e7a9877535484b77a185b505b514ca6800df +Author: Dinesh Israni +Date: Mon Jun 15 16:00:10 2020 -0700 + + Add support for restoring specific objects + + Users can specify individual objects to restore by specifying them under + spec.includeResources in ApplicationRestore + By default all objects from the backup will be restored + +commit 5dfb8543bfb4e735e7de344fa05ea6e0fa6bfdec +Author: Ram +Date: Fri Jun 26 22:14:29 2020 +0530 + + Add applicationregistration CR to register custom CRD for backup, migration + + Signed-off-by: Ram + +commit b89dee5cd5334dcabafca9aec4d6ccc1cb13c4f2 +Author: siva-portworx +Date: Mon Jun 22 11:03:42 2020 +0000 + + corrected the typo in the failure reason. + +commit 04e8e89d92eae87d386e2683aaaea9edda83d0d8 (origin/master_error_fix) +Author: siva-portworx +Date: Thu Jun 18 19:49:33 2020 +0000 + + Removed CR updates that leads to "object has be modified error". + + - In backupVolumes() and backupResources(), remove the + CR updates that can lead to "object has be modified error" + as two updates happens in the same cycle of reconciler. + +commit 32d39271602b0ec7a37512bb3dda4e2eadcc88fa +Author: Dmytro Tsapko +Date: Tue Jun 9 05:10:50 2020 +0300 + + [portworx] authorization for cluster manager calls is added + + Signed-off-by: Dmytro Tsapko + +commit 56967162e4049d4970919842d0e897bedee754d0 +Author: stgleb +Date: Thu Jun 18 01:50:21 2020 +0300 + + STOR-200 Add prepare and verify pods for mysql + +commit a2bc3899256069f7e08df2cd251064f8ca5b9946 +Author: Prashanth Kumar +Date: Sun Jun 14 17:01:26 2020 +0000 + + Added fetching backup size of volume backup + +commit 7ec19c88322e891772b01d7a7357efb2998fedc8 +Author: Prashanth Kumar +Date: Fri Jun 12 15:01:58 2020 +0000 + + vendor update for libopenstorage to support cloud backup size proto + +commit 55bd790a06d16799e9e37f2a454a2d828d4c60de +Author: Ram +Date: Thu Jun 18 19:58:25 2020 +0530 + + Disable auto-updating app scheduler as stork for app, + if disable annotations is applied + + Signed-off-by: Ram + +commit ca65f5006f4241a145d95c55803e3971c963e550 +Author: Dinesh Israni +Date: Mon Jun 15 18:22:46 2020 -0700 + + Add events when deleting pods from health monitor + +commit 9660efb6ea32c5e63d87d31de1c2992a3fa3a3f8 +Author: Dinesh Israni +Date: Mon Jun 15 18:20:42 2020 -0700 + + [Portworx] Map node status None to Online + + During node start the status for all the nodes is set to None, this shouldn't + cause pod deletions from the health monitor + +commit 8b4b217b0ceb771d681230e92a4edf97036bb950 +Author: Rohit-PX +Date: Fri Jun 5 15:43:25 2020 -0700 + + Integration test to check health check fix + + Signed-off-by: Rohit-PX + +commit 2bfd7160ebc5341903f0fab2b939184479b62c1e +Author: Dinesh Israni +Date: Mon Jun 8 16:05:30 2020 -0700 + + Add support to collect couchbase CRs + + Will be used during Migration, ApplicationClone and ApplicationBackup + +commit 23f558043bfbb8f3d8843bd417a4d7114f244ee7 +Author: Dinesh Israni +Date: Thu Jun 4 18:11:22 2020 -0700 + + Fix storkctl error message when suspending backup schedule + +commit 02a8ddb2603c1719dc5a1a4b09d24ab59ca52941 +Author: Dinesh Israni +Date: Thu Jun 4 16:37:54 2020 -0700 + + Fix elapsed time for application backup when it hasn't started + +commit 1636003bf7169c4227a86dddfa74c7d7466caa6e +Author: Dinesh Israni +Date: Thu Jun 4 16:37:42 2020 -0700 + + Bump version to 2.5.0 + +commit 152a07be564fafa79fd4de3fa6ba665650155f5f +Author: Dinesh Israni +Date: Thu Jun 4 14:38:34 2020 -0700 + + [Portworx] Add options to pass in incremental count frequency + +commit 0e091fb57413fbc5d52a9e127a91a8436c0e90e3 +Author: ram-infrac <35250819+ram-infrac@users.noreply.github.com> +Date: Fri Jun 5 10:24:42 2020 +0530 + + Update cmd/stork/stork.go + + Co-authored-by: Dinesh Israni + +commit ff9aab6c2e23f47d29b6d3b2f624db080fe88c7f +Author: Ram +Date: Fri Jun 5 09:55:45 2020 +0530 + + add flag to enable/disable webhook controller + + Signed-off-by: Ram + +commit cd05e53933c96d8de63aeaf5d10d7c3c9fbc4427 +Author: Dinesh Israni +Date: Thu Jun 4 14:40:56 2020 -0700 + + Add storageclass to s3 config in BackupLocation + +commit 302dab2f82005aff31268d17a32dfd3fc8de4ca7 +Author: Dinesh Israni +Date: Thu Jun 4 14:39:41 2020 -0700 + + Add -q flag to wget in Dockerfile + +commit fdc70d4eb5a52bf61f2837b99c015c5e5c644661 +Author: Ram +Date: Thu Jun 4 19:40:10 2020 +0530 + + use admission review request namespace to get volume info + - dont proceed if volume owner not found + + Signed-off-by: Ram + +commit 0a5a4afe686e0c1fcc8dd85432e19d292c344642 +Author: Christoph Böhmwalder +Date: Fri May 22 11:58:28 2020 +0200 + + fix stork-scheduler RBAC role for Kubernetes 1.18 + + While trying to deploy stork on a k8s cluster with version 1.18, we + noticed that some permission errors came up when deploying the scheduler. + This fixes the yaml to work for k8s 1.18. + + Signed-off-by: Christoph Böhmwalder + +commit 6343f8025385f5641edfe256d501bd53d3fe6198 +Author: Rohit-PX +Date: Thu May 28 10:15:12 2020 -0700 + + Add apt-get update to deploy script + + Signed-off-by: Rohit-PX + +commit c36af8e0b67efa4168d6efe2fb885b9038684d0e +Author: Dinesh Israni +Date: Wed May 27 12:18:42 2020 -0700 + + [AWS] Use correct volume id for CSI + +commit 61e36d97273c09cb42bffa2e7089bc219ef42181 +Author: Ram +Date: Wed May 27 21:46:39 2020 +0530 + + Create clusterdomainstatus CR at controller init + + Signed-off-by: Ram + +commit 46d7ea8e49b4da891f6f71240dbe8d7cdc8810fa +Author: Dinesh Israni +Date: Thu May 21 17:13:10 2020 -0700 + + [GCP] Parse zone and volume name for CSI PVCs + +commit 558b93ea72cd96edc84fa8f969ba21606fb06989 +Author: Dinesh Israni +Date: Thu May 21 17:12:38 2020 -0700 + + Vendor update for gcp libaries + +commit 8809af2c5fde3ef727c2243c3ab18281d1c8f172 +Author: Dinesh Israni +Date: Wed May 20 17:06:15 2020 -0700 + + Fix check for preparing jobs during restore + +commit 25d93f20fd76bc4143eb41d64629dc341fbe20ca +Author: Christoph Böhmwalder +Date: Fri May 8 10:39:57 2020 +0200 + + specs: remove initializers field from specs + + The metadata.initializers field has been deprecated since Kubernetes + 1.13, and has been completely removed in 1.16. So, when using a + Kubernetes version >=1.16, this errors out while creating the + deployment. + + Fix this by just removing the key in stork-deployment.yaml and + stork-scheduler.yaml. + + Signed-off-by: Christoph Böhmwalder + +commit d39c7e69afc409823034b6d7a6613c1cbe4ff037 +Author: Dinesh Israni +Date: Tue May 19 13:23:58 2020 -0700 + + [GCP] Fix for using correct volume ID for CSI PVCs + +commit 11aa9b09300e1e1f3813318a6c74d76912779e24 +Author: Dinesh Israni +Date: Tue May 19 13:16:56 2020 -0700 + + [AWS] Fix for using correct volume ID for CSI PVCs + +commit df398cd173852859f944d5af8edf2e51d0690c2c +Author: Dinesh Israni +Date: Tue May 19 13:03:14 2020 -0700 + + [Azure] Fix for using correct volume ID for CSI PVCs + +commit cb9f1aed0259bb1e9bd3abcfffbf7c53dcec1fae +Author: Dmytro Tsapko +Date: Thu May 7 02:10:36 2020 +0300 + + [portworx] openstorage was updated + + Signed-off-by: Dmytro Tsapko + +commit 29904ff46b62fb1f10fa6cf9ef9e5f0ce86e4f02 +Author: Dmytro Tsapko +Date: Wed Apr 22 21:23:01 2020 +0300 + + [portworx] TLS related scheduler to driver communication updates. + 1. TLS for Legacy REST API decided to be disabled. + 2. possibility of loading CA cert from external file was added + + Signed-off-by: Dmytro Tsapko + +commit c51d7a4d121f6be6a338abb5cde567fa7da09b11 +Author: Rohit-PX +Date: Wed May 13 22:49:06 2020 -0700 + + Vendor changes for security including torpedo, schedops and openstorage + + Signed-off-by: Rohit-PX + +commit 9d288298785275d5c92e184353273f8542b710ae +Author: Rohit-PX +Date: Wed May 13 22:43:44 2020 -0700 + + Changes for running stork with px security + + Signed-off-by: Rohit-PX + +commit cdb859759d74b1956c9fe974bf8998987f91f171 +Author: Dinesh Israni +Date: Wed May 13 14:15:00 2020 -0700 + + [AWS] Add CSI provisioner name check + +commit fbd5a9e681f95f79b98eaf293ce5c45414c3c3fe +Author: Dinesh Israni +Date: Wed May 13 14:12:58 2020 -0700 + + [GCP] Add CSI provisioner name check + +commit 9af530b8399f5f35d6908c086129bf8a0847e21f +Author: Dinesh Israni +Date: Wed May 13 14:10:13 2020 -0700 + + [Azure] Add CSI provisioner name check + +commit 13f7994c13763aaecf9d4e0e52df9c7655cae7f5 +Author: Dinesh Israni +Date: Wed May 13 14:39:34 2020 -0700 + + Update vendor dependencies + +commit 8b0aca75eb40ffca9a6851688ba499ebf08d7ece +Author: Dinesh Israni +Date: Tue Apr 14 16:01:25 2020 -0700 + + Collect jobs from resource collector + + Since jobs might not be idempotent, restoring/cloning/migrating them are + optional. They will always be backed up + +commit e5a35793e40569654cee3537c49d235b7359d1ac +Author: Ram +Date: Thu May 7 10:04:44 2020 -0700 + + change base docker image to redhat-ubi8 + + Signed-off-by: Ram + +commit 7442bd342b3041a0c2d9c088f5656c8700fdf5d1 +Author: Rohit-PX +Date: Mon May 11 12:26:51 2020 -0700 + + Change regex to pick individual focussed test + + Signed-off-by: Rohit-PX + +commit e6ec5554091c817553a6551950de14f55b76ecac +Author: Rohit-PX +Date: Mon Apr 27 15:58:32 2020 -0700 + + Add env variables for aws access to stork-test + + Signed-off-by: Rohit-PX + +commit 3ae156cc74fc2be5d25a00fa2cabaa77683195d1 (origin/master-pb438) +Author: Dinesh Israni +Date: Sat Apr 25 19:48:21 2020 -0700 + + During backup skip PVCs with unsupported drivers + +commit e03885db7dbc97fa8a7464f5a5dba1b04926f79b +Author: Dinesh Israni +Date: Fri Apr 24 20:48:13 2020 -0700 + + Don't block driver registration if init fails + + The init will be tried again when the APIs are called if init had failed + +commit e98e472b0fefbd3062ce4bdd089836a525f51c86 +Author: Ram +Date: Mon Apr 13 10:46:54 2020 +0530 + + vendor update for openstorage + + Signed-off-by: Ram + +commit a6df695bdd20d0b511f83add217e68e888f388b4 +Author: Ram +Date: Mon Apr 13 08:45:13 2020 +0530 + + [portworx] cloudsnap restore api update + + Signed-off-by: Ram + +commit 62e08ba1ab2635f88653a106bd5cc51f091e9edc +Author: Dinesh Israni +Date: Sat Apr 18 23:53:29 2020 -0700 + + Allow drivers to return resources that should be restored before volumes + + This allows the Portworx driver to specify any secrets that it might require to + be created before starting the volume restore. + No-op in the other drivers for now. + +commit f154ad70c80df0f786306b3a05a84f5dcd35fb73 +Author: Dinesh Israni +Date: Sat Apr 18 18:24:47 2020 -0700 + + Ignore error when updating initial state for pre exec rule in backup + + The object could have been modified. This allows the reconcile to run again with + the updated object + +commit 76b5f677548811a5a501d228788d55299e5c1ae0 +Author: Dinesh Israni +Date: Sat Apr 18 16:13:40 2020 -0700 + + Skip PV restore if bound to a PVC that shouldn't be restored + +commit 72693b859342d59d90dbac783ce653b3597e555b +Author: Dinesh Israni +Date: Fri Apr 17 23:44:24 2020 -0700 + + Fix pre exec rule failure in application backup + + Multiple updates were failing because of conflicts. Fetching latest object + before updating in pre exec path + Also if the pre exec rule fails the backup will be marked as Failed instead of + retrying + +commit e19b2d5b96ec6f0ae6cbe0c97c9bb38da94dc17f +Author: Ram +Date: Mon Apr 13 20:48:42 2020 +0530 + + update health check UT's for node status backupoff retries + + Signed-off-by: Ram + +commit 2033f1fb65fa9238b56c63763b6cb2b08a149505 +Author: Ram +Date: Thu Apr 9 21:03:24 2020 +0530 + + add backoff to check node status before deleting pods + - make node status check as async routine + + Signed-off-by: Ram + +commit c6bec7b08fc364d91b6cdc747415eecd4ee6b704 +Author: Ram +Date: Thu Apr 9 21:02:52 2020 +0530 + + Add driver node inspect api + + Signed-off-by: Ram + +commit fdb9fa4bec10540e95bcfb47fb60ab6b9dd44221 +Author: Dinesh Israni +Date: Thu Apr 16 18:38:04 2020 -0700 + + During restore skip resources from namespaces that aren't provided in spec + +commit ded9302c7a5f92d2318c37ea4c9121f47a5f41d7 +Author: Dinesh Israni +Date: Mon Apr 13 15:22:37 2020 -0700 + + Create bucket during application backup if possible + +commit 8c65b881689a931d10fda2c6730f773328020598 +Author: Dinesh Israni +Date: Mon Apr 13 15:22:23 2020 -0700 + + Vendor update + +commit dbb605fd07d291039f41d34c20425bb8d7e89793 +Author: Dinesh Israni +Date: Thu Apr 16 22:52:09 2020 -0700 + + [AWS] Retry snaps for internal errors + +commit b58853d44b97129b16172ef39a46654ae82dc3d8 +Author: Dinesh Israni +Date: Thu Apr 16 15:55:18 2020 -0700 + + Return error if not able to fetch snapshot info in schedule controller + +commit c160c90f1df05a3f487467dc631163392a2a2346 +Author: Dinesh Israni +Date: Thu Apr 16 15:54:47 2020 -0700 + + [Portworx] Add failure reason for cloudsnap to message + +commit c2123470a0571264651dbb5ce92228c8510fc81b +Author: Dinesh Israni +Date: Wed Apr 15 16:46:22 2020 -0700 + + For older backups missing the driver name, set the default + +commit 464474b0526c029a1576ac444b237a4aacea0a70 +Author: Dinesh Israni +Date: Wed Apr 15 16:45:41 2020 -0700 + + Remove some cattle specific annotations from namespace for migration + +commit da9194746bf813590c011a5ccf9f69cbd5379cc2 (origin/master_rulecr_update) +Author: Ram +Date: Tue Apr 14 22:06:12 2020 +0530 + + Don't start webhook controller if driver is empty + + Signed-off-by: Ram + +commit 69e5e26faf7612d7fbf2f59872cf382544759e29 +Author: Dinesh Israni +Date: Mon Apr 13 17:55:09 2020 -0700 + + [AWS] Add backoff for snapshot API in case rate limit is hit + +commit d045c5911630f2e68a4a7dc579092804235edd26 +Author: Rohit-PX +Date: Fri Apr 10 00:16:31 2020 -0700 + + Explicitly reset kubeconfig after every migration-backup loop + + Signed-off-by: Rohit-PX + +commit df8bcecf8f717824e9907712cdb7696446d9a821 +Author: Dinesh Israni +Date: Wed Apr 8 22:14:23 2020 -0700 + + [Portworx] Add backup id to spec when checking status + +commit d5cbcca0f8f1b0fb01fab1cf6934780d51193271 +Author: Dinesh Israni +Date: Wed Apr 8 22:14:03 2020 -0700 + + Mark backup as failed if preparin or uploading resources fails + +commit 6b16f8c2f4404ce339d8d6b08d68fdc82ca7db23 +Author: Rohit-PX +Date: Fri Apr 3 14:39:36 2020 -0700 + + Integration test for application backup of a migrated app + + Signed-off-by: Rohit-PX + +commit f4c24a7b9ea2775df9acbec8145f6d3b157e2f3c +Author: Dinesh Israni +Date: Fri Apr 3 09:33:26 2020 -0700 + + Check if schedule policy exists in cache + +commit c8d0a7460c1f7919deb3c9da05b2b83bb54bda75 +Author: Serhii Aheienko +Date: Tue Mar 3 20:58:35 2020 +0200 + + Use controller-runtime manager + + Signed-off-by: Serhii Aheienko + +commit 17aee876f1ecf1ce9d2480c6ebed561997002f38 +Author: Serhii Aheienko +Date: Tue Mar 3 20:58:12 2020 +0200 + + Update dependencies + + Signed-off-by: Serhii Aheienko + +commit f93c2405bd1e2501402b1836cd6291bb63d34d6e +Author: Dinesh Israni +Date: Tue Mar 24 17:46:00 2020 -0700 + + Vendor update for sched-ops + +commit 95db0243d6c04c47d8d096bcd12a66b57761522e +Author: Dinesh Israni +Date: Tue Mar 24 17:45:34 2020 -0700 + + Merge service accounts when applying for restore + +commit 4b0367842c5c72ea26803e66ecf1b9b5a08c71d0 +Author: Dinesh Israni +Date: Thu Mar 26 19:17:17 2020 -0700 + + Don't update LasUpdateTime in application backup if status isn't being updated + +commit ebb49b970f04ea72b92c33a26e89af1b9db67639 +Author: Dinesh Israni +Date: Tue Mar 24 18:22:57 2020 -0700 + + [Portworx] Check for annotation to skip backuplocation name check + +commit 70db754b57b25cdfd556182faa62257712aa9080 +Author: Dinesh Israni +Date: Tue Mar 24 18:22:38 2020 -0700 + + Vendor update + +commit 073c11880fe797c44573650feb123733f968145d +Author: Ram +Date: Thu Mar 26 11:31:15 2020 +0530 + + Avoid adding duplicate volume to restore status + + Signed-off-by: Ram + +commit 6e65288ed7cec2475330dc2f64a24f2ef7e2f0ba +Author: Rohit-PX +Date: Mon Mar 23 00:36:54 2020 -0700 + + Integration test for migration and reverse migraiton between two clusters + + Signed-off-by: Rohit-PX + +commit ca77db97beb9ea46d986e5c6d50694b6e1b241a9 +Author: Rohit-PX +Date: Fri Mar 13 12:25:58 2020 -0700 + + Delete app backups before backuplocation in sync controller tests + + Signed-off-by: Rohit-PX + +commit e9fc93492186051a9cac3c5a57763c2037d75d4f +Author: Dinesh Israni +Date: Mon Mar 23 16:49:25 2020 -0700 + + Add annotation to collect objects that have an owner reference + + In case of objects created by operators, might still want to collect some + objects that are only created by the operator once + +commit b680bdac79563df7708eea5a41c384465dd2b746 +Author: Dinesh Israni +Date: Fri Mar 20 17:03:04 2020 -0700 + + Update sched-ops client for all packages + +commit 20ff8ae1edb28821ce3ad9e681a28aaa08a870fb +Author: Ram +Date: Tue Mar 17 19:57:17 2020 +0530 + + [portworx] handle in-place restore for vol repl gt 2 for px driver + + - integration test check for in-place restore repl 3 vols + + Signed-off-by: Ram + +commit 4e64a43b6dd60cf8b375877cd5953b8df7f6a6d3 +Author: Rohit-PX +Date: Tue Mar 17 02:17:52 2020 -0700 + + Reduce wait time between checks for app backup + + Signed-off-by: Rohit-PX + +commit a5c3ec6de40dd218c517203b694d90b8dd93287b +Author: Rohit-PX +Date: Mon Mar 16 23:50:25 2020 -0700 + + Add parameter for secret name for cloud provider API access + + Signed-off-by: Rohit-PX + +commit 545b9ebb6f4c0aa52d494b21748fb0cb1281a6f3 +Author: Dinesh Israni +Date: Thu Mar 12 14:40:00 2020 -0700 + + Add cache for schedule policy + +commit c53bd588fd85d9a2a7f4111ff6c8658378ff44df +Author: Rohit-PX +Date: Fri Mar 13 15:28:32 2020 -0700 + + Add parameter for path in backup location + + Signed-off-by: Rohit-PX + +commit 5186ab68dc7e77a122948c378d80d7747be24720 +Author: Rohit-PX +Date: Wed Mar 11 16:50:13 2020 -0700 + + Do not install storks-scheduler for non-PX backups + + Signed-off-by: Rohit-PX + +commit d0c6dce72628154410d88e590b705415e613bd24 +Author: Rohit-PX +Date: Tue Mar 10 11:44:07 2020 -0700 + + Import k8s client auth, add gcloud binaries, aws authenticator in tests + + Signed-off-by: Rohit-PX + +commit b46a1fcd1c39b031fb7ecdaa5e53bc3fc60c3550 +Author: Dinesh Israni +Date: Thu Mar 5 18:40:58 2020 -0800 + + [Azure] Check if snapshot or volume is created before starting new operation + +commit 4dbda80748afd799f8109517ff70c9a491c7aa7e +Author: Dinesh Israni +Date: Thu Mar 5 16:08:18 2020 -0800 + + [GCP] Check if snapshot or volume is created before starting new operation + +commit 0286d275baaa504f67a76d04fa97f384b0cb102d +Author: Dinesh Israni +Date: Thu Mar 5 14:36:21 2020 -0800 + + Use common helper to generate tags for snapshots and volumes + +commit 0094d7bc422e4482585746d59d9c784a538fdd0a +Author: Dinesh Israni +Date: Thu Mar 5 14:20:09 2020 -0800 + + [AWS] Check if snapshot or volume is created before starting new operation + + Helps deal with crashes and when the operations are triggered but we aren't able + to store the status in the object + +commit 4e1265dc021f95885038607f2ccb31ae829d3e46 +Author: Rohit-PX +Date: Fri Mar 6 17:10:47 2020 -0800 + + Set all configmap after default config map is correctly set + + Signed-off-by: Rohit-PX + +commit dd6528ea402a823b9749741f9ce1550d05ef62de +Author: Rohit-PX +Date: Thu Mar 5 17:48:12 2020 -0800 + + Import Azure, AWS stork vol driver in test + + Signed-off-by: Rohit-PX + +commit 4b00824317d818eae8076bf6b89a8c30e9f0d295 +Author: Rohit-PX +Date: Thu Mar 5 11:17:32 2020 -0800 + + Remove vol driver from stork spec when not using PX driver + + Signed-off-by: Rohit-PX + +commit 2a6c59b2d9f6ae0dfa935dcdefc7192b5ea83a41 +Author: Dinesh Israni +Date: Tue Mar 3 18:31:49 2020 -0800 + + Add last update timestamp to backup and restore specs + +commit d027fd80a4a5e3245aa2a87446f710d354b8a009 +Author: Rohit-PX +Date: Wed Mar 4 18:49:59 2020 -0800 + + Replace volume driver in stork specs for non-PX backends + + Signed-off-by: Rohit-PX + +commit 4bc7695a549befbf83a11ebf04d19cb28311b6bc +Author: Rohit-PX +Date: Wed Mar 4 17:18:50 2020 -0800 + + Add apt-get install jq for ubuntu base images + + Signed-off-by: Rohit-PX + +commit 342869040bbce579a9ca1e2b103134ff30b8c40c +Author: Dinesh Israni +Date: Tue Feb 25 16:45:21 2020 -0800 + + Add a reason field for the overall backup and restore status + +commit 29c97961b3aa7e03f85548f05127ef6e5cae0fec +Author: Rohit-PX +Date: Tue Mar 3 18:06:08 2020 -0800 + + Integration to start app backup when another is in progress + + Signed-off-by: Rohit-PX + +commit eb919d621dba880026faca698c80f92d6399609c +Author: Rohit-PX +Date: Mon Mar 2 21:35:21 2020 -0800 + + integration test to delete backuplocation during app backup + + Signed-off-by: Rohit-PX + +commit 2ae626e5c64de5c67143a31697298f638478360e +Author: Dinesh Israni +Date: Mon Mar 2 15:31:10 2020 -0800 + + Add expected count for app backup schedule tests + +commit 68f3e25ef2827db3595dc7034da2e673c2baf1ec +Author: Dinesh Israni +Date: Mon Mar 2 15:29:01 2020 -0800 + + Vendor update for sched-ops to fix validation in test + +commit 5471a7e067ee0d5488f6e567d818d13fe23cd692 +Author: Dinesh Israni +Date: Thu Feb 27 12:43:43 2020 -0800 + + [Portworx] Add progress info for backup and restore to reason + +commit 1a0894e54ab8bfe39d40f36647cd1386efaa2517 +Author: Dinesh Israni +Date: Wed Feb 26 19:17:53 2020 -0800 + + Get the correct pluralized name of resources for the dynamic client + +commit b517566728495db466e05af096b01e04bb14696e +Author: Dinesh Israni +Date: Fri Feb 28 14:13:37 2020 -0800 + + Use correct type for length of backups in test + +commit 6f6efe4bf45d8ecfa10286d37c4fbb05a7671a13 +Author: Rohit-PX +Date: Thu Feb 27 18:45:23 2020 -0800 + + Vendor update for torpedo azure, aws vol drivers + + Signed-off-by: Rohit-PX + +commit 2b2eb1f09dee98fb0e0a3c35bb72ff665b8eb302 +Author: Rohit-PX +Date: Thu Feb 27 18:44:22 2020 -0800 + + Use aws, azure vol drivers from torpedo + + Signed-off-by: Rohit-PX + +commit af794e1527fca0d4370f08874dd05bf18c9780a6 +Author: Dinesh Israni +Date: Thu Feb 27 18:11:55 2020 -0800 + + Use correct API to delete backups in test + +commit abfb56253904f636b579f4c1af32407b8e66a240 +Author: Dinesh Israni +Date: Thu Feb 27 12:49:55 2020 -0800 + + Check for correct number of appplication backups in schedule test + +commit 139343270baa3cca22f1c5e8743a20a750c61ca5 +Author: Dinesh Israni +Date: Wed Feb 26 17:40:49 2020 -0800 + + Update applicationbackupschedule test for new retain policy + + Backups created by schedule aren't deleted when the schedule is deleted + +commit 58785daf81b6fe0d2bf0f018b06e4fd217af3e4f +Author: Dinesh Israni +Date: Wed Feb 19 15:55:08 2020 -0800 + + [Portworx] Don't migrate volumes which have the skipResource annotation + +commit f3228bec42288507a084d03f4a8284a755aac9c1 +Author: Dinesh Israni +Date: Wed Feb 19 16:25:42 2020 -0800 + + Cache the go cache directory in travis builds + +commit 699611ae6e52a136641f1e20650ba79f6a3fdc02 +Author: Dinesh Israni +Date: Wed Feb 19 16:18:26 2020 -0800 + + Update make in travis to start 2 jobs at once + +commit aeb8b783b9a1a475fa555eb4fc737c08f580f0ca +Author: Dinesh Israni +Date: Wed Feb 19 15:39:44 2020 -0800 + + Set defaults for the spec in migration schedule + +commit aa166f45857a8d5576eb608fac6c2432a01c3c3a +Author: Dinesh Israni +Date: Wed Feb 19 15:39:20 2020 -0800 + + Set the default reclaim policy to retain for application backup schedule + +commit 0fd730ccf20dda71264e30cb9a116861aa2aa11e +Author: Dinesh Israni +Date: Wed Feb 19 15:59:00 2020 -0800 + + Collect CronJob objects for migration, backup and clones + +commit 705e53b68b3379538b326bfc3a97d5da749ce897 +Author: Rohit-PX +Date: Mon Feb 24 23:03:31 2020 -0800 + + Vendor update for GCE torpedo driver + + Signed-off-by: Rohit-PX + +commit c8b52067ce1270643c9565d36b58390428afcbd3 +Author: Rohit-PX +Date: Mon Feb 3 17:43:44 2020 -0800 + + Ability to deploy apps on cloud platforms + + Signed-off-by: Rohit-PX + +commit 9d9feb71284a79e1d8fa0bbbda3c3ac078c8ba79 +Author: Dinesh Israni +Date: Thu Feb 20 16:04:40 2020 -0800 + + Vendor update for sched-ops + + Fixes issue in integration test while checking for status + +commit df061d0810ec583d9a4ca41d679eaa869fd893c4 +Author: Rohit-PX +Date: Tue Feb 18 19:22:09 2020 -0800 + + Scale down cassandra app after cloning + + Signed-off-by: Rohit-PX + +commit 4e8943ff3d46225f6636e3f3672655c516a6ce81 +Author: Dinesh Israni +Date: Thu Feb 13 20:41:48 2020 -0800 + + Remove initializer option from test script + +commit c975353e957a123d54acfab4bef3bbeda526e7d2 +Author: Dinesh Israni +Date: Thu Feb 13 14:44:06 2020 -0800 + + Fix events in webhookadmission controller + + Events need to be raised against an object. Raise it against the + deployment/statefulset/pod if we have it, otherwise raise it against the webhook + config + +commit 046dc744d6e7ec4d0a0abee6369620dc1b064a2d +Author: Dinesh Israni +Date: Fri Feb 7 19:07:39 2020 -0800 + + Update APIs for k8s 1.16 + + - Removed initializer since it has been deprecated since k8s 1.14 + - Updated sched-ops APIs + - Updated printing from storkctl since APIs had changed + +commit 67b5cf0ec29dca0f0be53f21b45277fd9502b7aa +Author: Dinesh Israni +Date: Thu Feb 6 20:02:33 2020 -0800 + + Update vendor for k8s and dependencies to 1.16.6 + +commit 350b6cb6e50c6f8bed133f921308d2ad3239e2cf +Author: Rohit-PX +Date: Mon Feb 10 22:33:37 2020 -0800 + + Fix regex to replace entire stork,stork_test image names + + Signed-off-by: Rohit-PX + +commit 5823ef060e9d14e75de976bacf4f13e418c274cc +Author: Dinesh Israni +Date: Thu Feb 13 16:53:55 2020 -0800 + + Fix duplicate tags being checked for aws driver + + Use constants to avoid typos + +commit 38e7d20a5cf56e7c71eca01d8d66da0d42220a18 +Author: Dinesh Israni +Date: Wed Feb 12 14:58:59 2020 -0800 + + [Azure] Store the resource group for volumes during snapshot + + Use it during restore so that it can be restored across resource groups + +commit 136de3ab7bac1057638fdb3a9cd71a23095d9e5f +Author: Dinesh Israni +Date: Tue Feb 11 14:00:41 2020 -0800 + + Change driver init error message to debug + +commit 323faeab5ae7319eb79ee2a83d3f192880e47514 +Author: Dinesh Israni +Date: Tue Feb 11 14:59:28 2020 -0800 + + Remove unused status types + +commit c0244eb623c67d3f6a858d44119de2630a7f3c0b +Author: Arthur Lutz +Date: Tue Feb 11 12:50:38 2020 +0100 + + [README] typo fix + +commit 2a14501ef2b758af297d3366222425d950ec31ae +Author: Dinesh Israni +Date: Thu Feb 6 00:25:45 2020 -0800 + + Init the schedule package even if driver isn't specified + +commit 977f8cbb26b5e5a4a8576023dfc5f3fedba637f6 +Author: Dinesh Israni +Date: Thu Dec 12 16:31:00 2019 -0800 + + Implementation for AWS driver + + Added support for ApplicationBackup and ApplicationRestore APIs + +commit 63b71c459fddc854ddce4e1d1c713e9c831f39eb +Author: Dinesh Israni +Date: Thu Dec 12 16:29:22 2019 -0800 + + Vendor update for aws + +commit 241c1b39f4af7582a67adbde916653c00c3b19a6 +Author: Dinesh Israni +Date: Tue Jan 21 16:47:32 2020 -0800 + + Don't need to prepare PV when doing backup + +commit 8630944976dc472d05beaba7778a1df4d62c553a +Author: Dinesh Israni +Date: Tue Jan 21 16:47:12 2020 -0800 + + Implementation for Azure driver + + Added support for ApplicationBackup and ApplicationRestore APIs + +commit 7c3a29584a43b60e091564936543817dbf2d2aa1 +Author: Dinesh Israni +Date: Fri Dec 6 17:19:11 2019 -0800 + + Vendor update for azure dependencies + +commit 29a157cdd125c9b9f7b0b3d2457f431beb34c096 +Author: Dinesh Israni +Date: Wed Jan 29 17:49:14 2020 -0800 + + Add support for gcp multi zone disks + + Also use zone from disk when creating snapshot and disk instead of the zone + where stork is running + +commit d55b799989020c56a5861887beb7ab8a1b6346ca +Author: Rohit-PX +Date: Wed Feb 5 08:49:35 2020 -0800 + + Increase wait time for synced backups to appear on the destination cluster + + Signed-off-by: Rohit-PX + +commit 5b5621941971bc65d64c8a802174aae723fc22de +Author: Serhii Aheienko +Date: Thu Jan 30 13:35:18 2020 +0200 + + Add Data export api definition + + Signed-off-by: Serhii Aheienko + +commit dae7f407ffe028c0f156776b37c98cd453593ff3 +Author: Dinesh Israni +Date: Fri Jan 24 13:23:07 2020 -0800 + + Use correct namespace when checking status of adminClusterPair + +commit d1f7095fb037600bb718e5ecaa0a6df5ab1d63f1 +Author: Ram +Date: Tue Jan 21 14:40:25 2020 +0530 + + vendor update for sched-ops + + Signed-off-by: Ram + +commit 258723fd0feed24664be867139f19e80d0a1d280 +Author: Ram +Date: Mon Jan 20 22:14:13 2020 +0530 + + get stork namespace from env for webhook + - generate CN using namespace in env + - log server failure errors + + Signed-off-by: Ram + +commit 2db938fa528ac8fa7cc98030ebd8157180eacd44 +Author: Ram +Date: Thu Jan 9 19:50:29 2020 +0530 + + generate self signed certificates for mutate webhook + admission controller + - sched-ops vendor updates + + Signed-off-by: Ram + +commit 09f2784315ea29a0ff2e8484b0d255526e0451dc +Author: Ram +Date: Fri Dec 27 12:15:17 2019 +0530 + + add mutating webhook controller to stork + - start webhook server on 443 + - add listner for mutate admission + - verify if deployment/ss using px volume and update + scheduler to stork + - update stork specs with webhook admission configuration + - fix errcheck warnings + handle pod scheduler update for webhook controller + - nil check for non persistentclaim source + - allow default app deployment + - update scheduler path to include pod spec path + + Signed-off-by: Ram + +commit 87997a219d1746440a617477265f712156a98687 +Author: Dinesh Israni +Date: Fri Jan 24 13:25:02 2020 -0800 + + Revert "Use correct namespace when checking status of adminClusterPair" + + This reverts commit c963f03b29357fd3326afb474b8f8d4ddf5a3533. + +commit c963f03b29357fd3326afb474b8f8d4ddf5a3533 +Author: Dinesh Israni +Date: Fri Jan 24 13:23:07 2020 -0800 + + Use correct namespace when checking status of adminClusterPair + +commit fba23c30430f40f7a99bdec81d94d67f90aeedf4 +Author: Dinesh Israni +Date: Fri Jan 3 02:21:21 2020 -0800 + + Remove stale generated files + +commit 2a3c6a17d572970d683adecd819a953c5e7f093c +Author: Dinesh Israni +Date: Fri Dec 13 13:52:16 2019 -0800 + + Update start up parameters + + Start the application manager controllers even if no driver is specified + +commit 8748a0f36bd807dcce63089e211dbd6eaf2e0360 +Author: Dinesh Israni +Date: Fri Dec 6 17:19:11 2019 -0800 + + Vendor update + +commit 25d286f112b8c57b38737be311da23b314329376 +Author: Dinesh Israni +Date: Sat Oct 19 01:30:24 2019 -0700 + + GCE driver implementation + + Can be used to take snapshots of GCE PDs and restore them to PVCs + +commit 82217b2fa71de790b20f831b162fbbe010ec1f56 +Author: Dinesh Israni +Date: Thu Oct 17 22:38:43 2019 -0700 + + Changes for app backup/restore to work with multiple drivers + + - When taking a backup we will now look through PVCs in the namespace for all + supported drivers and try to create a backup + - The driver name will be stored in the backup object so that we know which + driver to use when doing the restore + - This also allows us to create a backup with multiple volume drivers in the + same backup + - Added an options field in the application backup CRD which can be used to + store info like projectID, zone, etc for cloud drives + +commit 44b7af4da13149fe8da7dbe31d31e1c22b8ffef9 +Author: Rohit-PX +Date: Sun Jan 19 23:00:46 2020 -0800 + + Increase wait time for backups to be synced + + Signed-off-by: Rohit-PX + +commit ef10d727b91499774ab383e12f02b326f26a03ed +Author: Dinesh Israni +Date: Fri Jan 17 12:06:26 2020 -0800 + + Disabling unittest because of bug in fake client + +commit 23da4bc8555004684b6dd1ff76eca184903ca94f +Author: Dinesh Israni +Date: Thu Jan 16 18:50:55 2020 -0800 + + Add activate/deactive migration support for ibp objects + +commit fa8da1871df18f980582a99b83fe0c337185623f +Author: Dinesh Israni +Date: Thu Jan 16 18:50:33 2020 -0800 + + Vendor update + +commit 7dc3df13f98522aa1a0604e0223877f9ab6c8c4c +Author: Dinesh Israni +Date: Wed Oct 9 17:15:01 2019 -0700 + + Add support for some CRDs to resourcecollector + +commit fd149960d627014354ad233a0675cc9c3e0e0ec1 +Author: Grant Griffiths +Date: Fri Dec 13 14:15:09 2019 -0800 + + Add stork secrets implementation based on lsecrets.Instance() + + Signed-off-by: Grant Griffiths + +commit 4ac2b39d8e86570b99842ddb1eee7c3205df8dfe +Author: Rohit-PX +Date: Wed Nov 13 00:42:23 2019 -0800 + + Scaled integration test for app backup + + Signed-off-by: Rohit-PX + +commit 54645c25047d6a27e15777c2f0f3ed014ac35739 +Author: Ram +Date: Wed Dec 4 13:26:43 2019 +0530 + + fix staticcheck errors + + Signed-off-by: Ram + +commit f92709c17b81de61747e17d0b7ef6b28c605c68e +Author: Ram +Date: Wed Dec 4 12:34:13 2019 +0530 + + [portworx] bump px version for in place restore feature + + Signed-off-by: Ram + +commit fb8f8cb4452d8235a511516e312bb1c692cb9835 +Author: Ram +Date: Fri Nov 22 21:00:59 2019 +0530 + + add integration test for migration cleanup feature + + Signed-off-by: Ram + +commit 8026c7c36869492b5940cc8934c0ef1d7bf3f6e2 +Author: Ram +Date: Tue Nov 26 19:18:17 2019 +0530 + + Move destination resource collection to migration controller + - use NewInstance() instead of changing singleton k8s instance in + resource collector + - collect old resources to purge inside migration controller instead + of resourcecollector pkg + - change migration cleanup status to migration purge + - collect resource from destination cluster using new resource + collector + + Signed-off-by: Ram + +commit ef30674a7ccafe91fd4f7a7b2392ef4f96dac9ea +Author: Ram +Date: Mon Nov 18 23:55:35 2019 +0530 + + Cleanup migrated resources from destination cluster + - modified GetResources() to accept cluster config to fetch resource from + - only fetch resources which has migration annotation + - find and delete cleaned up resources from dest cluster + + Signed-off-by: Ram + +commit df796e6cf246fff637588339d7b20aeafc5f865e +Author: Ram +Date: Tue Nov 12 23:56:30 2019 +0530 + + Add CleanupResources flag to migration specs + - add annotation to migrating resources by stork + - have stub to cleanup resources for dest cluster + + Signed-off-by: Ram + +commit a1da847c31d14ea0400c33c3392c1acd64b938de +Author: Ram +Date: Tue Nov 26 10:26:56 2019 +0530 + + Wait for volume restore to succeed + + Signed-off-by: Ram + +commit 5ad0ac664eaadb93953c6b28e1740a4c4edb9928 +Author: Dinesh Israni +Date: Fri Nov 15 18:34:42 2019 -0800 + + Rename stage in VolumeSnapshotRestore + +commit e1dce3b483660f487c2d9038e3846a91e9482528 +Author: Dinesh Israni +Date: Thu Nov 14 16:56:37 2019 -0800 + + Increase timeout for backup test + +commit c762b61282ce760618d5c2b6a30ba22d1d5e701d +Author: Dinesh Israni +Date: Tue Nov 12 23:00:29 2019 -0800 + + Remove call to verifySnapshot for cloudsnap test + +commit 27d8b1ec0ac530b94524eeb9463734164f21b1ed +Author: Rohit-PX +Date: Mon Nov 11 14:46:58 2019 -0800 + + Verify snapshot instead of groupsnapshot + + Signed-off-by: Rohit-PX + +commit e10b370262ef1ff1231525956a17cc020aea4013 +Author: Rohit-PX +Date: Mon Nov 11 13:17:54 2019 -0800 + + Wait for snapshot to complete in in-place restore tests + + Signed-off-by: Rohit-PX + +commit 8e91de91cbfe9118bfc451b770c5c01703ce827c +Author: Ram +Date: Fri Nov 8 08:28:23 2019 -0800 + + [Portworx] fixes groupcsrestore feature + + - correct error msg for restore failure + - pass only one pool while doing haupdate + - add debugging for correct poolids + + Signed-off-by: Ram + +commit 77285813a5a3442b2a09f51bdb0244b6be689b1d +Author: Rohit-PX +Date: Thu Nov 7 11:18:45 2019 -0800 + + Ability to skip restore tests even when running individually. + + Signed-off-by: Rohit-PX + +commit 589f83239dfe6690868ee909ac0d56f11a897d55 +Author: Dinesh Israni +Date: Wed Oct 30 17:28:41 2019 -0700 + + Updates for storkctl + + - Add missing backupName when creating ApplicationRestore + - Add replacePolicy param when creating ApplicationClone and ApplicationRestore + - Fix alias clash for VolumeSnapshotRestore and ApplicationRestore + +commit 64e6b90ad5b6bacec47400c2ee47e1282b8d7bfa +Author: Rohit-PX +Date: Wed Nov 6 16:20:25 2019 -0800 + + Add vendor updates for auth groupvolumesnapshot. + + Signed-off-by: Rohit-PX + +commit fde1fdca2bd6d87b55187d5101a37bb5feedb59b +Author: Rohit-PX +Date: Wed Nov 6 00:15:05 2019 -0800 + + Add test for group cloudsnap restore, flag to skip tests. + + Signed-off-by: Rohit-PX + +commit 02f776fcdd7db501a0309b8a182b222bc3df8821 +Author: Ram +Date: Tue Nov 5 09:02:49 2019 -0800 + + pass poolids to Nodeid for ha update api + + Signed-off-by: Ram + +commit efcaadf1e8ecc80a73a9b1385d4559393d7faaf3 +Author: Rohit-PX +Date: Mon Nov 4 16:48:21 2019 -0800 + + Parameterize stork volume driver for integration tests. + + Signed-off-by: Rohit-PX + +commit 82e4959488320b4f145f8af50dde37ce4c4d9add +Author: Ram +Date: Mon Nov 4 02:22:25 2019 -0800 + + enable cs inplace restore integration tests + + Signed-off-by: Ram + +commit dc4974e1cb9ac0abeeb7e90f8bae038fbd02bd1e +Author: Ram +Date: Thu Oct 31 12:15:40 2019 +0530 + + update ha increase api nodeid to poolids + - update gettoken api with vendor changes + + Signed-off-by: Ram + +commit 12d6d093f6895e9088f230d47ba6ced9fd2aba58 +Author: Ram +Date: Thu Oct 31 12:13:29 2019 +0530 + + vendor update openstorage/release-7.0 + + Signed-off-by: Ram + +commit b2844ea9c522ed824f70c82e1276e2b672db7e9b +Author: Rohit-PX +Date: Fri Nov 1 16:43:32 2019 -0700 + + Remove sleep which is not required. + + Signed-off-by: Rohit-PX + +commit 5969ad9bac19eed82915e0ff3daa380a5ea5495f +Author: Rohit-PX +Date: Fri Nov 1 12:32:42 2019 -0700 + + If stork_test is running, tail pod logs. + + Signed-off-by: Rohit-PX + +commit 3b305535b27f177d209dce3e4f2554ea1388c550 +Author: Rohit-PX +Date: Thu Oct 24 13:46:28 2019 -0700 + + Add API to wait for backup completion in integration tests. + + Signed-off-by: Rohit-PX + +commit b4315735cda93292155d9f99b4e56743ee9a94b6 +Author: Dinesh Israni +Date: Wed Oct 30 15:39:21 2019 -0700 + + Update google sdk and some python packages in container + +commit 0edbe2ce1c72172aab585ba6baf2008eba1b750d +Author: Rohit-PX +Date: Tue Oct 29 12:38:12 2019 -0700 + + Vendor update for torpedo. + + Signed-off-by: Rohit-PX + +commit c1127c65a863a8530e0a288f536a113fef01f983 +Author: Rohit-PX +Date: Mon May 6 11:45:40 2019 -0700 + + Add ability to run in auth-enabled environment. + + Signed-off-by: Rohit-PX + +commit 17aa6d2fcc4a5dd9c7dfa446b3fc2ddcfd3f7fbd +Author: Rohit-PX +Date: Mon Oct 28 13:49:14 2019 -0700 + + Add selector labels to stork daemonset and cassandra example. + + Signed-off-by: Rohit-PX + +commit ecdc6c2125b0df5ab04c60bdf3e6736bd59ccef1 +Author: Rohit-PX +Date: Mon Oct 28 12:34:03 2019 -0700 + + Fix selector label. + + Signed-off-by: Rohit-PX + +commit 183987621be9e1f2c5a8556ece6cba89034fb6f5 +Author: Rohit-PX +Date: Mon Oct 28 11:52:49 2019 -0700 + + Add selector to spec. + + Signed-off-by: Rohit-PX + +commit deff958975694c7f3769a968cde0a76f6a5c42f7 +Author: Rohit-PX +Date: Mon Oct 28 11:20:16 2019 -0700 + + Update API version to v1. + + Signed-off-by: Rohit-PX + +commit 35060e5a26d0dddbd53bef5b8c30f80e803e0a23 +Author: Dinesh Israni +Date: Fri Oct 25 16:00:39 2019 -0700 + + Update spec version for deployments + + Also add csinodes permission for scheduler clusterrole, + required for k8s 1.16 onwards + +commit c938095c7e5aa3fae294af24b167e67f552c460c +Author: Rohit-PX +Date: Mon Oct 14 01:36:27 2019 -0700 + + Migration scale test. + + Signed-off-by: Rohit-PX + +commit c232cfedb42d2bccd62dbb9d58496ccaa039dce0 +Author: Rohit-PX +Date: Mon Oct 21 18:21:22 2019 -0700 + + Update torpedo vendor. + + Signed-off-by: Rohit-PX + +commit 0d54b386a278b6e20402d9cdf125bfb5f3216207 +Author: Dinesh Israni +Date: Sat Oct 19 01:31:08 2019 -0700 + + Update test flag parsing which was broken in go 1.13 + +commit e340d820e733cdaba3a2e9b3ee690bb651345b8f +Author: Dinesh Israni +Date: Thu Oct 17 22:46:19 2019 -0700 + + Update go version in travis to 1.13.1 + +commit 66ea6ffe433cdd17e375439e35983f322fc0afc4 +Author: Dinesh Israni +Date: Thu Oct 17 22:45:56 2019 -0700 + + Vendor update + +commit c046355acd59894ef543c4be9d7881891d6775e3 +Author: Dinesh Israni +Date: Wed Oct 9 18:04:50 2019 -0700 + + Groupsnapshot: If objects exist reuse them if they are the same + + Else delete and re-create them + +commit e3114609abac69b49174f2ae0d6fed87359bf783 +Author: Harsh Desai +Date: Sun Oct 13 10:00:07 2019 -0700 + + update vendor to use sched-ops kubernetes-1.11 + + Signed-off-by: Harsh Desai + +commit cc2e9f96a038bba5e533f9213172ccab12b0b52e +Author: Dinesh Israni +Date: Wed Oct 9 18:05:59 2019 -0700 + + Bump version on master to 2.4.0 + +commit 6a3497c42b2a12fd5b6337a2b81d760b91cbb5f4 +Author: Rohit-PX +Date: Wed Oct 9 12:14:54 2019 -0700 + + Fix error with creation of stock-mock-time. + + Signed-off-by: Rohit-PX + +commit 283dfa2d646afcdbbdb59e69614169b3697f575f +Author: Dinesh Israni +Date: Wed Oct 9 01:25:35 2019 -0700 + + Use created config map for watch on mock time + + Previously if the mock time config map was not created we would end up using the + empty config map to start the watch + +commit fb628de7dcfb6ccfe0c92ed4ba4bcd7b9341c3dd +Author: Dinesh Israni +Date: Fri Oct 4 19:28:22 2019 -0700 + + Add checks for some resources when collecting them + + - Secrets: Ignore autocreated secrets with well known prefix names + - ServiceAccounts: Ignore autocreated service accounts unless they have + non-default image pull secrets configured + - Ingress: Only collect namespaced scoped objects + - Role/RoleBinding: Ignore autocreated objects starting with "system:" + +commit f8ae20dc8570259da875992298e8a450bb6fee50 +Author: Dinesh Israni +Date: Fri Oct 4 19:27:16 2019 -0700 + + Fix retry for Unauthorized errors when migrating resources + +commit 05b615774d3b9b9f351056f0cc59a6d66a31a300 +Author: Dinesh Israni +Date: Thu Sep 26 17:08:26 2019 -0700 + + Some optimizations for migration + + - Don't cancel migrations that are in Final stage + - Don't update migration schedule after prune if the object wasn't updated + +commit 10c3e3e72bb5e0511beed1d3265117265f188791 +Author: Dinesh Israni +Date: Sat Sep 28 14:18:23 2019 -0700 + + Ignore PVCs that are being deleted during backup + +commit 0362b76554980d2f3c5e25516c0d30b0c6141bc4 +Author: Dinesh Israni +Date: Sat Sep 28 14:07:13 2019 -0700 + + Fix termination of backoff for cancelling backups + +commit 85080c7e6bca8b08ac05c9092c6917a41620aed5 +Author: Dinesh Israni +Date: Fri Sep 27 17:09:49 2019 -0700 + + Add the old parameter for cluster-admin-namespace + + Added the message that it is deprecated + +commit fa4dd6972783a9e99394ef4f431d5693e98b5943 +Author: Aditya Dani +Date: Thu Sep 26 18:40:26 2019 -0700 + + Cluster domain tests: Wait till scale factor reduces to 0 before failover. + +commit d260c19a2f888a13cecb7ab75676e165c47df6f2 +Author: Rohit-PX +Date: Thu Sep 26 14:21:14 2019 -0700 + + Remove incorrect check for reclaim policy. + + Signed-off-by: Rohit-PX + +commit 1ceddf079b835d0d7706d8b728e2a39dc987b469 +Author: Rohit-PX +Date: Tue Sep 24 00:59:35 2019 -0700 + + Backup to and restore from S3, Azure, Google. + + Signed-off-by: Rohit-PX + +commit 599aea4030e917386351da19d7c21ebeb0e65f70 +Author: Dinesh Israni +Date: Tue Aug 13 17:48:04 2019 -0700 + + Cancel and fail backup if starting volume backup fails + +commit cfb91a641e74963c829927ca003db28412830933 +Author: Dinesh Israni +Date: Wed Sep 25 15:45:22 2019 -0700 + + Revert "Update travis for 2.3 branch" + + This reverts commit db3ec66dba91bc6ca150ce900953c093b9a52014. + +commit 7b52a5e7fa4573b48a1859d16ba4f36c3b4bb821 +Author: Aditya Dani +Date: Tue Sep 24 16:25:50 2019 -0700 + + Check the scale factor of the application after scaling it down. + +commit db3ec66dba91bc6ca150ce900953c093b9a52014 +Author: Dinesh Israni +Date: Wed Jan 30 19:23:58 2019 -0800 + + Update travis for 2.3 branch + +commit 252f6145c4a665c062bb846c152a68d6d8e4b196 +Author: Dinesh Israni +Date: Tue Sep 24 18:12:38 2019 -0700 + + Fix race in migration schedule integration test + + The prune for migrations might still be in progress, so add retries + +commit 8a05f413d630ac7ee240ddd4d7f17b38f6685245 +Author: Rohit-PX +Date: Mon Sep 9 17:42:59 2019 -0700 + + Add migration test with startapp flag set to false. + + Signed-off-by: Rohit-PX + +commit e7a053a135d33a809aef2ce5bf1cbad2a940379e +Author: Ram +Date: Mon Sep 23 23:54:49 2019 +0530 + + update UT's to additionally check for volume count for snapRestore + + Signed-off-by: Ram + +commit 75e1685a38c35f699f148ce523b9f9bb9b25f331 +Author: Ram +Date: Mon Sep 23 23:54:15 2019 +0530 + + vendor update portworx/sched-ops + + Signed-off-by: Ram + +commit 030eba580b3244d9078c0aa0b9f4c5b804860b1d +Author: Ram +Date: Mon Sep 16 20:52:30 2019 +0530 + + Correct volume count for storkctl snaprestore output + - remove unnecessary logs + Signed-off-by: Ram + +commit c27038598439486e23f6ca7bba3b82876e82e55e +Author: Dinesh Israni +Date: Fri Sep 20 15:57:10 2019 -0700 + + Don't migrate default rbac objects + +commit 01815b26d3054175803c6d84ffc747b90866abf8 +Author: Dinesh Israni +Date: Thu Sep 19 17:44:54 2019 -0700 + + Retry migrations for Unauthorized errors + + The kube apiserver can sometimes return Unauthorized when running on the cloud + if there are temporary auth errors + +commit ae892f9156bdbb5bad9c96b62f7684f0fd1b9660 +Author: Dinesh Israni +Date: Fri Sep 20 16:14:43 2019 -0700 + + Update the number of workers to 10 for each controller + + Will make if configurable for each controller in the future + +commit 1d6146f1317ff5ab05f2773dccad82299cbbda7d +Author: Dinesh Israni +Date: Thu Sep 19 11:05:44 2019 -0700 + + Vendor update for snapshotter + + Adds cache when listing volumesnapshotdata instead of reading from + kube apiserver everytime + +commit fee9c8f36b864053218d92a1bb95fb701508acd7 +Author: Ram +Date: Sat Sep 21 09:56:52 2019 +0530 + + detect snapshot type while cleaning up restore objects + in volumesnapshotrestore + + Signed-off-by: Ram + +commit a3d5ce729068f35dc869692b7a7568fb40544d22 +Author: Ram +Date: Fri Sep 20 23:32:01 2019 +0530 + + update torpedo api changes to integration-test + + Signed-off-by: Ram + +commit aa7bd73051250f5fb0ad2898edea339d2493a474 +Author: Ram +Date: Fri Sep 20 05:09:11 2019 -0700 + + Vendor update torpedo + + Signed-off-by: Ram + +commit 787a5ed469c69314befd7dcd9c7c8a45269cefb4 +Author: Ram +Date: Fri Sep 20 17:11:32 2019 +0530 + + replace maps for restore volumeinfo in crd + - use pvc,namespace filed to extract volume information + - clean snapshot in-place restore for each volume + - show restore status for each volume + + Signed-off-by: Ram + +commit 8ede2852f88dc462e98afc0ed9852c9c977b8c84 +Author: Ram +Date: Fri Sep 20 01:08:39 2019 +0530 + + store pvc name and namespace mapping instead of whole pvcspec + + Signed-off-by: Ram + +commit 4fa4511244de4ed413160adf9022510cbf031d68 +Author: Ram +Date: Fri Sep 20 01:07:38 2019 +0530 + + add detail volume info snaprestore crds + + Signed-off-by: Ram + +commit c9e7e76b061d373818e077b73dd95f513744ae37 +Author: Grant Griffiths +Date: Fri Sep 20 15:36:14 2019 -0700 + + Check ownership of VolumeAttachment before deleting + + Signed-off-by: Grant Griffiths + +commit 9e044d720820005da7289c035b674b3ebe11bb82 +Author: Rohit-PX +Date: Fri Sep 20 14:45:34 2019 -0700 + + Revert changes made for testing. + + Signed-off-by: Rohit-PX + +commit 14e782fc2c7049e7255cdcbcc995bde4e9d4e72c +Author: Rohit-PX +Date: Fri Sep 20 14:25:43 2019 -0700 + + Use scale factor from destination cluster to scale source in failback. + + Signed-off-by: Rohit-PX + +commit 302785e0af3836fc691b0497c71492940422c902 +Author: Rohit-PX +Date: Thu Sep 19 13:27:14 2019 -0700 + + Set old scale factor in failback test. + + Signed-off-by: Rohit-PX + +commit 545fc4666da109ca557a9ab3e97691f5d895daea +Author: Grant Griffiths +Date: Fri Sep 20 12:56:20 2019 -0700 + + Delete VolumeAttachments for down node or pod in unknown state + + Signed-off-by: Grant Griffiths + +commit 01ec37438733c3fbfeb211301567fc168d26549a +Author: Grant Griffiths +Date: Fri Sep 20 11:38:20 2019 -0700 + + Update vendor + + Signed-off-by: Grant Griffiths + +commit 3a9a677b79598b0743789b65410c87eeaa7b14bf +Author: Luis Pabón +Date: Wed Sep 18 17:49:31 2019 -0700 + + Fix CSI unstructured object access + +commit a6c426c7187b0b74e0f880cd084734eb9ac15728 +Author: Rohit-PX +Date: Thu Sep 19 22:38:21 2019 -0700 + + Reset config to source after backup sync controller test. + + Signed-off-by: Rohit-PX + +commit f2d0a9436341ec357fcb74812ddcf8f53ff097da +Author: Rohit-PX +Date: Thu Sep 19 14:02:30 2019 -0700 + + Order snapshot tests to run before migration tests. + + Signed-off-by: Rohit-PX + +commit 4757bf0faa5ca18a825a2191a23259740b0d64e8 +Author: Dinesh Israni +Date: Tue Sep 17 23:29:11 2019 -0700 + + Create annotations for app during migration if it doesn't exist + + Also remove noisy log message + +commit 4c87ec9e5c7d82ceb757fbc0d8fdecdff32d30bf +Author: Rohit-PX +Date: Tue Sep 17 11:27:59 2019 -0700 + + Use cassandra instead of mysql for clusterdomain migration. + + Signed-off-by: Rohit-PX + +commit 4241f55a4914edabdb06034687faf4d46afee503 +Author: Rohit-PX +Date: Thu Sep 12 17:32:39 2019 -0700 + + Allow running of individual tests. + + Signed-off-by: Rohit-PX + +commit be7040a0e3c8efdd52cce0d99ba3a432740387c8 +Author: Rohit-PX +Date: Mon Sep 16 17:56:13 2019 -0700 + + Use name from the existing secret object as new obj might be nil. + + Signed-off-by: Rohit-PX + +commit 0d9a17c9366915f0ff7da31fdc43c84dfad01146 +Author: Dinesh Israni +Date: Thu Sep 12 17:59:32 2019 -0700 + + Update API group for stork-scheduler permission for replicaset + +commit a56395cd9c555c930029868a9a22fdc618967356 +Author: Ram +Date: Wed Sep 11 12:26:21 2019 +0530 + + Don't add duplicate entry for already present imagepullSecrets + + Signed-off-by: Ram + +commit 7a2c14100a8b0049bc65a7611561613f1b657977 +Author: Ram +Date: Tue Sep 10 16:45:42 2019 +0530 + + Migrate image pull secrets associated with default service account + + Signed-off-by: Ram + +commit d7624f2f4e0d1da8cf229ef4327ffe1bb7aaf223 +Author: Dinesh Israni +Date: Wed Sep 11 17:02:45 2019 -0700 + + Don't set predicates or priorities for scheduler + + It picks up the defaults from k8s 1.10 onwards + +commit dad6473aab94a2b037e3ce4d70b32e3df4337c51 +Author: Dinesh Israni +Date: Wed Sep 4 15:10:10 2019 -0700 + + Create namespaces for restore if they don't already exist + +commit 69c001906756bf7dc5e34e22463e1fa509ade550 +Author: Dinesh Israni +Date: Wed Sep 4 15:02:00 2019 -0700 + + Remove owner ref from synced backups + + This was causing backups to be deleted by the k8s because the owner, + which is the backup schedule, won't be present on remote cluster + +commit c46f6dfa27d3a613deccb9e7bbfb7c3a5cf3421e +Author: Dinesh Israni +Date: Wed Aug 14 22:12:39 2019 -0700 + + Add check in monitor to skip duplicate offline nodes with same IP + + It is possible for the storage driver to return information for 2 nodes with the + same IP if a new node was re-using the IP from a node that was removed. + In that case the health monitor would incorrectly determine that the storage + driver was offline on the node. + + This change removes offline nodes with duplicate IPs + +commit e5ea8255801a5852cee711ee7c668902689a438b +Author: Dinesh Israni +Date: Wed Aug 28 19:27:58 2019 -0700 + + Convert reclaim policy to string before setting in unstructured object + + Can't set custom types using SetNestedField + +commit c39cbb36199212065a9cb5b8d13391ca5a0b6488 +Author: Dinesh Israni +Date: Tue Sep 3 15:49:46 2019 -0700 + + Create namespace mapping map in application restore if not present + +commit 54a430952512a9832831723f211cb1fe3bcdb964 +Author: Rohit-PX +Date: Thu Aug 22 18:45:43 2019 -0700 + + Integration test for backup sync controller. + + Signed-off-by: Rohit-PX + +commit 4a0e6d84471783bdca391ab887cc8c24eeeec60c +Author: Aditya Dani +Date: Tue Aug 27 12:57:48 2019 -0700 + + Move the ClusterDomainsStatus.Info check within the retry task. + + - Stork will create the CDS object but it might not update the Info object + until it gets it back from the storage provider. + +commit 3224f511a5932974a793d9df5637eb122cdb6252 +Author: Rohit-PX +Date: Mon Aug 26 17:57:54 2019 -0700 + + Increase wait time when waiting for cluster domain list. + + Signed-off-by: Rohit-PX + +commit fce3389cc68a1893ac96daac6358bf43a4978ea6 +Author: Aditya Dani +Date: Sun Aug 25 23:13:10 2019 -0700 + + Cluster Domain Integration Tests + + - Add a task wait and retry over ListClusterDomainStatus call + before concluding that the tests are not running in cluster domains + environment. + +commit faea830df8aac52591087439e01f1242cffb0d45 +Author: Dinesh Israni +Date: Sat Aug 24 22:21:13 2019 -0700 + + Update go version for travis build + +commit ca799b78d7fce4825107a78b6f11918dd3240cf3 +Author: Rohit-PX +Date: Fri Aug 23 16:10:49 2019 -0700 + + Add quotes to the enable cluster domain flag. + + Signed-off-by: Rohit-PX + +commit bc0e9926fd84d65fe0cfc91e423a22f30c419dba +Author: Rohit-PX +Date: Thu Aug 22 18:49:08 2019 -0700 + + Fix flag for enabling cluster domain tests in integration tests. + + Signed-off-by: Rohit-PX + +commit 53099ddfc07741866ff1580028bf5bf9c3ef1541 +Author: Rohit-PX +Date: Tue Aug 20 16:33:21 2019 -0700 + + Add 'nil' param to ListNamespaces sched-ops method invocation. + + Signed-off-by: Rohit-PX + +commit 65127137a6ecbeefcd5dc355ff0b261b3e4d5017 +Author: Rohit-PX +Date: Tue Aug 20 16:17:00 2019 -0700 + + Vendor Updates. + + Signed-off-by: Rohit-PX + +commit 9a8e7f56dd1813d1e195a211f69c92b32cc00bb8 +Author: Rohit-PX +Date: Mon Aug 19 23:03:18 2019 -0700 + + Add storage provisioner flag to stork-test. + + Signed-off-by: Rohit-PX + +commit a2450e756afef7868435fd0726e0ae577036f442 +Author: Dinesh Israni +Date: Fri Aug 16 15:37:29 2019 -0700 + + Save the clone volume names after generating so that we can use them on failure + +commit 977657fd2b882139af3aa3a03b57e955a5389d7b +Author: Dinesh Israni +Date: Fri Aug 16 15:36:27 2019 -0700 + + [Portworx] Delete created volume clones on failures + + This ensure that all volumes are created together when retried + +commit 18ccdd37258bbf0c02a26df1ce387d6b76973938 +Author: Dinesh Israni +Date: Tue Aug 13 17:52:16 2019 -0700 + + Remove check for error when refreshing discovery helper + + The library takes care of the error + +commit 961ffa6085e6272afb5365b5caefd816fb976855 +Author: Dinesh Israni +Date: Tue Aug 13 17:46:56 2019 -0700 + + Replace collections helper with modifying objects directly + +commit 49e5c63fcaa6477679ac23786744a3a7c4292a5b +Author: Dinesh Israni +Date: Tue Aug 13 16:42:12 2019 -0700 + + Vendor update + + Update k8s pacakges to 1.11.9 + +commit 974e7a2865b7cb944d9d2da3e874751fb2d84b77 +Author: Rohit-PX +Date: Fri Aug 16 17:05:52 2019 -0700 + + Integration test for label selector. + + Signed-off-by: Rohit-PX + +commit 63408fd629ad1bdab069c13b43d7d7e20919d843 +Author: Ram +Date: Wed Aug 7 22:38:46 2019 +0530 + + Address review comments + + Signed-off-by: Ram + +commit 391e8e24dfb7558f45a1e61395b18e9ac709b977 +Author: Ram +Date: Wed Aug 7 01:40:44 2019 +0530 + + Cleanup restore objects upon CRD delete + - fix restore fails when haUpdate fails for restore vol + - make task add more unique + + Signed-off-by: Ram + +commit ddb55b7ad3aaee28308a7f9cb8416f6e4053554e +Author: Dinesh Israni +Date: Fri Aug 16 23:24:45 2019 -0700 + + Update cassandra version in integration test + + The older version has a bug which could lead to an empty commit log file + which causes issues during restart + +commit 8dff2de6e1abd01319f8469c1b5910910d4c2aa0 +Author: Dinesh Israni +Date: Thu Aug 15 13:07:07 2019 -0700 + + Vendor update for torpedo + + Fixes test issue when creating rules + +commit 2e863970a599d0380243f6bce2cca972c26776d6 +Author: Rohit-PX +Date: Wed Aug 14 17:29:38 2019 -0700 + + Integration tests for pre/post failing rules and spec files. + + Signed-off-by: Rohit-PX + +commit 6ca355dfdc77ae87a56d3cea78b44bf169110a0b +Author: Dinesh Israni +Date: Sat Aug 10 13:56:23 2019 -0700 + + Update version for Deployment and Statefulset + +commit 25a49dfd0136f64a0bca51bbf917fa07bdf6bf47 +Author: Dinesh Israni +Date: Sat Aug 10 13:55:49 2019 -0700 + + Vendor update for sched-ops and torpedo + +commit 168aab918faad8e4c1f6ef78b514ca81ed7c8f75 +Author: Dinesh Israni +Date: Sat Aug 10 13:42:23 2019 -0700 + + Pass in correct namespace when running pre/post exec rules for clone + +commit cf051db8390340eadcfa4d34426f0432ce033176 +Author: Dinesh Israni +Date: Thu Aug 8 23:31:33 2019 -0700 + + Integration tests for application clone with rules and label selectors + +commit fcf8665149b31784cf2927d77f3b9dbb019469aa +Author: Rohit-PX +Date: Sun Aug 11 18:23:57 2019 -0700 + + Integration test for application backup with pre/post exec and missing rule. + + Signed-off-by: Rohit-PX + +commit 4df16d5f9f05fa34846c06af7a8864832c4f26c9 +Author: Ram +Date: Mon Aug 12 19:20:14 2019 +0530 + + Disable CS inplace restore feature + + Signed-off-by: Ram + +commit cf19c5cf197d58ecae43319fda71c138da802019 +Author: Ram +Date: Wed Aug 7 22:42:21 2019 +0530 + + Disable CSRestore integration tests + + Signed-off-by: Ram + +commit 7c56efd2a9c3887f54c109e4e6ca3d09b8aa2972 +Author: Dinesh Israni +Date: Tue Aug 6 17:48:29 2019 -0700 + + Don't return error from schedule controllers if policy is invalid + +commit e73bbbf46645371e9a1d77da983369644c06d9e2 +Author: Dinesh Israni +Date: Tue Aug 6 17:25:05 2019 -0700 + + Update for application backup and schedule + + - For schedule set retain policy to delete by default + - When deleting backup ignore NotFound error for backup location + +commit fa902c4fe37d48f8461269b7ad970bf54a3e6c67 +Author: Dinesh Israni +Date: Tue Aug 6 17:10:40 2019 -0700 + + Add integration tests for application backup schedule + +commit f2be98dff14d40d0847da46d2e01554d03e006a5 +Author: Dinesh Israni +Date: Mon Aug 5 16:44:31 2019 -0700 + + [Portworx] Add version checks for application backup and snap restore + +commit 66c87147d31988a21d599f272232831d63a818b2 +Author: Dinesh Israni +Date: Mon Aug 5 16:36:43 2019 -0700 + + storkctl subcommands for ApplicationBackupSchedule + +commit 755325f0c1e7e4dd3546cde3b3149cfca94ad89d +Author: Dinesh Israni +Date: Mon Aug 5 16:36:24 2019 -0700 + + Vendor update for sched-ops + +commit 19da95bd7280434afa8455f027ffdd6b6ba7c6ca +Author: Ram +Date: Fri Aug 2 16:14:39 2019 +0530 + + Validate snapshot for restore + + signed-off-by: ram + +commit 1621151c727ae7a7e13f8956ab544735ca4647fe +Author: Rohit-PX +Date: Tue Jul 30 17:17:58 2019 -0700 + + Ability to add environment variables to be added to stork deployment in integration tests. + + Signed-off-by: Rohit-PX + +commit c4177fe98e3c1dea241477fc633e052b1f2feb98 +Author: Ram +Date: Wed Jul 31 11:56:08 2019 +0530 + + Move snapshotrestore test before migration + + Signed-off-by: Ram + +commit 3590d42dd351d03c44240ab8f02094f0198b9dc6 +Author: Rohit-PX +Date: Tue Jul 23 13:46:05 2019 -0700 + + Comment tests to test out the job. + + Signed-off-by: Rohit-PX + +commit a9f90d61a8e0070da79d2f1ebff445382400958e +Author: Dinesh Israni +Date: Fri Jun 14 15:29:38 2019 -0700 + + Add field in BackupLocation to specify if backups should be synced + +commit f9424c1e5a27b843b523db3ba8bd4c93185d416a +Author: Dinesh Israni +Date: Wed Jun 12 15:06:36 2019 -0700 + + Add TriggerTimestamp to ApplicationBackup + + Useful to keep creation time when backup objects are restored to another cluster + Also setting the default namespace mappings during restore if none are provided + +commit 3d3b2fbdc1e4f1d0afcd94a5ac3e5bb33bd8c934 +Author: Dinesh Israni +Date: Thu Jun 6 13:10:59 2019 -0700 + + Add controller to sync backups + + - Scans all the backupLocation for backups + - If a backup from the backupLocation doesn't exist on a cluster create one with the + format - in the namespace. Scheduled backups + retain their original name since they already have a timestamp in the name + - The ReclaimPolicy it set to Retain for the synced objects so that deleting + them doesn't delete the backup from the BackupLocation + +commit 70e52e0195ff52371970ed6f5ef538b7b9bc684e +Author: Dinesh Israni +Date: Mon Jun 3 18:19:20 2019 -0700 + + Destroy all contexts before waiting in integration test cleanup + +commit ac984c2d98ec36cd66fb5bbeaa2b3697ffa109e7 +Author: Dinesh Israni +Date: Wed Jul 24 16:48:53 2019 -0700 + + Add unit tests for health monitor + +commit 4b7941828e0afce2d9996979d6a0fd6ca080f3db +Author: Dinesh Israni +Date: Wed Jul 24 16:44:37 2019 -0700 + + [Monitor] Delete pods from node if it is in any phase + + A pod could be scheduled and the storage driver on the + node could go offline after that causing the pod to be + stuck in Pending state + +commit 0cfb3632eb514bd7967e9255d52d2cf200f5193f +Author: Dinesh Israni +Date: Wed Jul 17 14:25:36 2019 -0700 + + Add API to resourcecollector package to delete objects + + This can be used when resources need to be deleted before creating them again. + Deletion needs to be done for all the objects beforehand because there could be + resources that have dependencies on other resources. For example, a PVC can't be + deleted if a pod is using it. + +commit c6cd98371924068bf34158131fc72d784a21886b +Author: Ram +Date: Mon Jul 29 12:24:03 2019 +0530 + + Vendor updates torpedo + + Signed-off-by: Ram + +commit d9d40ac13184a6dcbbe37b341561d9cd4fcd8266 +Author: Ram +Date: Fri Jul 26 23:35:27 2019 +0530 + + Use addTask instead of Schedule for creating group snap + - correct api WaitOnDriverUpOnNode api + + Signed-off-by: Ram + +commit 68963cd193e39d076030e55976871dfb34fd11ab +Author: Ram +Date: Thu Jul 25 00:16:17 2019 +0530 + + Add integration test for snapshot restore + - grouplocalsnapshot restore + - cloudsnapshot restore + - addressed review comment + + Signed-off-by: Ram + +commit 3b2fa929da49d3f1d5abc7b22815fb1ab439b47a +Author: Ram +Date: Fri Jun 28 00:40:30 2019 +0530 + + Basic integration test for snapshot in place restore + - Vendor changes for inplace restore intergration tests + - Fix namespace for inplacerestore crd + - Vendor update for sched-ops, torpedo + + Signed-off-by: Ram + +commit 70f6a019ae327ec8f9cd6b29bb7377699ee6d56e +Author: Ram +Date: Mon Jul 29 11:38:43 2019 +0530 + + Address review comments + + Signed-off-by: Ram + +commit 9645ce32ed61bd67d957a191a837f9e324fae087 +Author: Ram +Date: Fri Jul 26 01:37:44 2019 +0530 + + Add retry for voldriver_restore api + - send proper rs while ha_update + - handle reconsiler failure condition properly + - use updatePVC sets + + Signed-off-by: Ram + +commit bb657322388b8e709571842c1066760a14bbdaf0 +Author: Rohit-PX +Date: Tue Jul 9 17:15:43 2019 -0700 + + Update Gopkg.toml for dep failures. + + Signed-off-by: Rohit-PX + +commit e9cfbbb1573f0fdd244467c40ae153ce562bf207 +Author: Rohit-PX +Date: Wed Jun 12 12:09:47 2019 -0700 + + Integration test for backup/restore. Replace policy - Delete. + + Signed-off-by: Rohit-PX + +commit d9deefc8fdeea25008f7f61f5a1921e5cf0dc3f6 +Author: Dinesh Israni +Date: Tue Jul 16 14:41:36 2019 -0700 + + Add an integration test for application clone + +commit 65b79302f45bd7e869a06754753e016e516bab0c +Author: Dinesh Israni +Date: Tue Jul 16 14:40:29 2019 -0700 + + ApplicationClone: Create destination namespace if it doesn't exist + +commit 883352abc1f8439d05aaa52c591c43e3f9c26da8 +Author: Dinesh Israni +Date: Mon Jul 15 16:07:50 2019 -0700 + + Vendor update for sched-ops and torpedo + +commit 5b48f8e27616baa6bb5d8376ac0b6a8b1d127d06 +Author: Dinesh Israni +Date: Fri Jul 26 13:36:15 2019 -0700 + + Update permissions for stork-role + + Needed for application restore + +commit 5c904f33a5733fe62258fde9a3a25c8ed6129df9 +Author: Dinesh Israni +Date: Mon Jul 22 15:07:33 2019 -0700 + + [Portworx] Add labels to cloudsnap for application backups + +commit ebb387d43d4e429462a02b29b3d735c042d21aa3 +Author: Dinesh Israni +Date: Tue May 21 14:57:11 2019 -0700 + + Add controller for application backup schedule + +commit 987c6b555c31462f4201d04ab558fc0ec059a2d2 +Author: Dinesh Israni +Date: Mon May 20 17:08:05 2019 -0700 + + Add CRDs for ApplicationBackupSchedule + +commit bad6ba7adb6a063e4d1d952eb2c45fdfe5f9793d +Author: Dinesh Israni +Date: Thu Jul 25 13:41:10 2019 -0700 + + Fix duplicate imports + + Was causing the latest staticcheck to fail + +commit cb94241e576eab5040957b31e7848ffba218f907 +Author: Dinesh Israni +Date: Mon Jul 22 19:16:38 2019 -0700 + + Validate name and namespace when generating clusterpair + +commit 4a04e421eaf8c1f99e3620aac35985611b6da7a3 +Author: Luis Pabón +Date: Fri Jul 12 14:38:05 2019 -0700 + + Update vendor + +commit 9cce69a03693449c3b495b2a46d86be6547e8e0f +Author: Luis Pabón +Date: Mon May 13 13:59:07 2019 -0700 + + CSI Support + +commit e8a3b7847fab86de6a2d075eac7a18d6db8948b8 +Author: Dinesh Israni +Date: Wed Jul 17 18:37:04 2019 -0700 + + Add -t to test2json to add timestamps + +commit 00d9152fefbf43ec5fe7988e902a31b38dff103f +Author: Dinesh Israni +Date: Thu Jul 11 15:02:27 2019 -0700 + + Store snapshot schedule info in annotations instead of labels + + Values in labels have a smaller length limit and the info should be an + annotation anyways + + Fixes #415 + +commit cd9f704867586af2b7e0bf65b9af303536d952a8 +Author: Dinesh Israni +Date: Wed Jul 10 15:46:29 2019 -0700 + + Allow users to specify annotation in pod if only local node should be preferred + +commit 00a97d76a7c7ca4e532cb26689e7378453c3ae9b +Author: Dinesh Israni +Date: Mon Jul 1 13:00:58 2019 +0400 + + When uploading objects don't close writer async + + It can return errors which should be reported back + +commit 9181c724b6d73e719359e1b286afca0aaf54247c +Author: Ram +Date: Thu Jun 27 14:27:00 2019 +0530 + + Fix correct restore volume name in log + - add volumeId in info log + + Signed-off-by: Ram + +commit 274376b5342560c9ae1279e11e8035cbe2172e18 +Author: Ram +Date: Thu Jun 13 22:53:59 2019 +0530 + + Review comments + - handle repl update of volume + - delete orphaned objects once restore completes + - remove restoreType from CRD + - fix unique restore TaskID + + Signed-off-by: Ram + +commit 5588558a2c858366130fc13bb6947e2090668277 +Author: Ram +Date: Wed Jun 12 22:30:15 2019 +0530 + + Rearrange snapshot restore controller + - prepare restore objects before in place restore + - check status of snapshots restore objects + - keep restorevol map and pvc list with snapshot restore + CRD + + Signed-off-by: Ram + +commit 98ca860a8abade8d55ab512ed3b209ed3861fc03 +Author: Ram +Date: Wed Jun 12 01:14:04 2019 +0530 + + Add preparation stage for in-place snapshot restore + + Signed-off-by: Ram + +commit 89a2d7d08a7d7196f4bc3cbccd9c887a756acfb0 +Author: Dinesh Israni +Date: Thu Jun 20 10:33:19 2019 +0400 + + Don't skip delete for PV and PVCs when replacing + +commit 8587c17d87c3f9e31daeccdb279faf7f05103dd5 +Author: Harsh Desai +Date: Fri May 31 17:04:50 2019 -0700 + + [Portworx] Fail Portworx driver init if service doesn't have required ports + + Signed-off-by: Harsh Desai + +commit 28ff69936f09b5de52e308deae902c1035af11b8 +Author: Dinesh Israni +Date: Thu Jun 13 16:49:48 2019 -0700 + + Create annotations to store migration replicas if it doesn't exist + +commit 541a0f8afc1bb50d71c34053f60846e8203595a5 +Author: Dinesh Israni +Date: Thu Jun 13 14:33:28 2019 -0700 + + Ignore errors from discovery helper for aggregate APIs + +commit c4b4526c86e825fedbf50fc3b3835b90553a6d01 +Author: Dinesh Israni +Date: Thu Jun 13 16:38:37 2019 -0700 + + Add support to collect Template objects + +commit 0300b1ba34ec8a393fe0a0bb8e5ba5459da50831 +Author: Dinesh Israni +Date: Tue May 28 19:55:16 2019 -0700 + + Ignore error in storkctl if deploymentconfig type isn't found + +commit fdeaf4557546fa0222fa779a001033e1ec6706f0 +Author: Piyush Nimbalkar +Date: Wed Jun 12 16:22:06 2019 -0700 + + Remove storage cluster CRD and controller + + Signed-off-by: Piyush Nimbalkar + +commit 603f5f576fb2a6b84f0c9afd1399802c538f6cf4 +Author: Dinesh Israni +Date: Mon Jun 10 17:17:27 2019 -0700 + + storkctl subcommands for backuplocation + +commit 1d50a5c6ac67372be4a3dd7f68031cc6c78b6ae2 +Author: Dinesh Israni +Date: Mon Jun 10 17:23:39 2019 -0700 + + Vendor update for sched-ops + +commit a302011186a76ef7c17b8d7176b37370ffd69b1c +Author: Dinesh Israni +Date: Thu Jun 6 15:58:23 2019 -0700 + + storkctl subcommands for application clone + +commit 32ee7104f6da915108a7ab9ce87a1044f53f0f18 +Author: Dinesh Israni +Date: Thu Jun 6 14:31:27 2019 -0700 + + storkctl subcommands for applicationbackup and applicationrestore + +commit 4e47e815ee834d6f6c1624aa6494f7394f0c7462 +Author: Ram +Date: Mon Jun 10 23:27:14 2019 +0530 + + sched-ops vendor update + + Signed-off-by: Ram + +commit 8a907b88c09fb20c02dc4cb40f952f5b8fbefee2 +Author: Ram +Date: Tue Jun 4 15:59:28 2019 +0530 + + Storkctl support for in-place restore + + - Pretty print snapshot-restore output + - Add UT's for storkctl snapshotRestore + - Review comments + + Signed-off-by: Ram + +commit 0525e3e34e1917abefb01eeae7df412bd9ab6956 +Author: Ram +Date: Tue Jun 4 14:47:04 2019 +0530 + + Add unit test for restore check in extender_test + - adjust ut's to create pvc + - address review comments + + Signed-off-by: Ram + +commit 326292106f8a84a9e8ba237dfb0ede2392d51000 +Author: Ram +Date: Fri May 31 19:18:51 2019 +0530 + + Add stork scheduler check for in-place restore + + - address review comments + - nil check for pvcclaim + + Signed-off-by: Ram + +commit 9641642969ca2aa7bdd446680efbc1c97187dddd +Author: Ram +Date: Thu May 2 01:18:26 2019 -0700 + + Add controller for In-place snapshot restore + - Fix go-lint errors + - Review comments + - Generated files for changed volumesnapshotCRD's + - Restore snapshot where pvc is in use by pods + - Move controller specific login from portworx driver to snapshot restore + controller + - Generated files by codegen + - Review comments + + Signed-off-by: Ram + +commit 88c5d54ec2676d38c90fbfa3d8dd66ce2a451a70 +Author: Ram +Date: Thu May 2 01:16:32 2019 -0700 + + Add CRD for In-place volume snapshot restore + - codegen generated files + - Add controller for In-place snapshot restore + - Add support for local groupsnapshot in-place restore + - Fix go-lint errors + - Review comments + - Check restore status before calling driver's snapshot restore + - codegen generated files + + Signed-off-by: Ram + +commit ee02a051e0d487339d09d3acaf495f4971d9f1ed +Author: Dinesh Israni +Date: Thu May 30 20:44:35 2019 -0700 + + Update staticcheck for integration test + + It throws an error when giving the package name for some reason now + +commit 5c7a76edb581ea6031513e78e60817d1178a8488 +Author: Dinesh Israni +Date: Wed May 29 22:03:14 2019 -0700 + + Fail migrations if local domain is inactive + +commit ec80b4110e13e59d54e210240ed8a35b6f144027 +Author: Dinesh Israni +Date: Thu May 30 20:04:04 2019 -0700 + + Update permissions for all stork resources + +commit 988a58993dafc844f051fb7d2dd7f29632a617ad +Author: Dinesh Israni +Date: Wed May 29 22:04:11 2019 -0700 + + Update migration behavior for PVs + + - Set reclaim policy to Retain if volumes aren't being migrated + - Update PV if it already exists instead of deleting and creating + +commit 3633b8d73a47ffa3d4bc0ca84349d6a6d4a13b79 +Author: Dinesh Israni +Date: Wed May 29 22:03:34 2019 -0700 + + Portworx: Return empty clusterdomain list if not set + +commit 31f54fd09fa55e771a677a1d1eec825dfd5febbd +Author: Aditya Dani +Date: Wed May 29 17:43:05 2019 -0700 + + Portworx ClusterDomainsStatus: Do not fail on volume enumerate errors. + + - Add a new state SyncStatusUnknown when the driver fails to fetch the sync status. + +commit 12554e96b5f3ec6b4c0d0aac93726643b163e7b4 +Author: Aditya Dani +Date: Tue May 28 18:41:22 2019 -0700 + + integration test: Use remote cluster to activate/deactivate domains + +commit d1e467829e95c23c2d55fca861bd51300061159b +Author: Dinesh Israni +Date: Fri May 3 22:19:33 2019 -0700 + + Fix type for annotation to not collect resources + +commit a80f31e6c39624f529102ba608e1b32edfb6e2dd +Author: Dinesh Israni +Date: Thu May 2 23:20:59 2019 -0700 + + Controller for ApplicationClone CRD + + - ApplicationClone objects are only allowed to be created in the admin namespace + for now + - First the volumes for all the PVC are cloned + - Then the resources are copied from the source to destination namespace + - ClusterRole is not copied since it is a cluster scoped object + - ClusterRoleBinding is merged to have same binding for both namespaces + +commit 6f66e799199af79ee0dbb796475c62cf79487be9 +Author: Dinesh Israni +Date: Thu May 2 23:18:29 2019 -0700 + + Add interface in volume driver to Clone Volumes + + Also added implementation for portworx driver + +commit d8f7f2af82fb40354341887a154daf748728f762 +Author: Dinesh Israni +Date: Fri May 17 18:42:57 2019 -0700 + + Add package to encrypt/decrypt using AES + +commit 004a47fbf98625b528582f96f4320e1ba0a11f52 +Author: Dinesh Israni +Date: Fri May 17 18:41:15 2019 -0700 + + Update backup and restore controllers to use encryption key + + EncryptionKey is used up from the BackupLocation object + +commit 30b95a6a02f38d7624827b395df46acd4ed86096 +Author: Dinesh Israni +Date: Wed May 1 15:28:49 2019 -0700 + + Vendor update for new dependencies + +commit 29546a4b5dc55003d1943d70f79dea43ad61cac1 +Author: Dinesh Israni +Date: Thu May 9 14:34:34 2019 -0700 + + Print stork image with `storkctl version` command + +commit 43128581471a0f81efdc0c683477d0d68701157b +Author: Dinesh Israni +Date: Thu May 9 14:33:27 2019 -0700 + + Update stork parameters + + Don't print defaults, it is printed automatically + Set kube-system as the default admin namespace + +commit d4356d08d62c742e11be714f512c6d61aa7d0d89 +Author: Dinesh Israni +Date: Fri May 3 15:13:44 2019 -0700 + + Add controllers for application backup and restore + + - Backup and restore is triggered for the volume first followed by the resources + - The location of the backup is specified by a BackupLocation + - The volume driver stores the volume backup in its format + - The resources are stored under /// + - This path is stored in the backup object + - An ApplicationRestore object needs to refer to an ApplicationBackup that it + wants to restore from + +commit c32f94c9fc24dd35ffb1dd2a43f786527eea3588 +Author: Dinesh Israni +Date: Wed May 1 16:20:58 2019 -0700 + + Add interface to volume driver to backup and restore volumes + + Also added implementation for Portworx driver + +commit 0a696e689069f7a928cfe585cef65665f5c26bec +Author: Dinesh Israni +Date: Wed May 1 16:23:36 2019 -0700 + + Add package for objectstore abstraction + + The API takes a BackupLocation object and return a bucket handle + which can be used for CRUD operations + +commit 38b071f7abc0e907b7238a67aa15cf4ce1791ea1 +Author: Aditya Dani +Date: Mon May 27 17:37:59 2019 -0700 + + Update dep versions for sched-ops and torpedo packages + + Signed-off-by: Aditya Dani + +commit 5841c29b9adfecb9f876908b81e8db2e8cc82f34 +Author: Dinesh Israni +Date: Mon May 27 10:54:38 2019 -0700 + + Add suspend and resume subcommands to storkctl + +commit 8a0d552989bb585b61a9266cecc5437ad0de2d85 +Author: Aditya Dani +Date: Sun May 26 00:14:07 2019 -0700 + + vendor updates for sched-ops, torpedo and talisman + +commit 19f0f469468a2578f3c556eb655522c49612d67b +Author: Aditya Dani +Date: Mon May 27 16:01:42 2019 -0700 + + Modify storkctl to show ClusterDomain's sync status + + - Use the updated ClusterDomainsStatus CRD and fetch the SyncStatus from it + - Modify storctl UTs + +commit 3255eccc342aef3a6fd7c798720cd2004f0d2bcd +Author: Aditya Dani +Date: Sun May 26 14:51:41 2019 -0700 + + Portworx: Determine cluster domain's sync status based on volume replicas + + - Modify Portworx's GetClusterDomains API + - Enumerate all portworx volumes and based on the current and create replica set + determine if a volume is in resync. + - Determine a cluster domain's sync status based on resyncing volumes and their nodes. + + Update the ClusterDomainsStatus controller to use the modified CRD. + +commit 8338eb362300bb34e4846b976149ceea82c69150 +Author: Aditya Dani +Date: Sun May 26 00:06:17 2019 -0700 + + Modify ClusterDomainsStatus CRD + + - Remove Active and Inactive lists + - Add a ClusterDomainInfo object under Status + - Add new SyncStatus that indicates whether a cluster domain is in sync with other + cluster domains. + +commit c1406b3acc7eab1a49b4b756af72e14985fa4e05 +Author: Ram +Date: Mon May 27 23:26:53 2019 +0530 + + Add wait options for storkctl create migrations + + Signed-off-by: Ram + +commit 1f61136389475fcf5fe195d45934b36b46f07c27 +Author: Ram +Date: Fri May 24 22:20:37 2019 +0530 + + Add wait poll support for storkctl activate clusterdomain + + Signed-off-by: Ram + +commit fe496a5ced7a7a90349b6c5693f6be96629bf669 +Author: Dinesh Israni +Date: Fri May 24 19:41:09 2019 -0700 + + Print N/A for volumes and resources when not migrating them + +commit f2e070eac4fc7dbd7609f8445730453122b507ed +Author: Dinesh Israni +Date: Thu May 23 18:03:23 2019 -0700 + + Create some default schedule policies + + Also update strokctl to use a default policy for migration schedule + +commit 8dd0ce7ed3c5d1690b88e6fe545bc43c6ece3a16 +Author: Dinesh Israni +Date: Wed May 22 16:54:30 2019 -0700 + + Optimize migrations for clusterrole + + For clusterrole, get a list of crbs initially and use that to check if migration + is required + + Also skip deployer and builder service accounts which are automatically created + on OCP + +commit 745b5402bd3840a2c9cc1236ef014023c1cc123a +Author: Harsh Desai +Date: Tue May 21 14:42:48 2019 -0700 + + [Portworx] add idempotency for local snapshots + + Signed-off-by: Harsh Desai + +commit 6824155a70d0af3da694bd3ebef9786506fab041 +Author: Dinesh Israni +Date: Wed May 22 15:27:46 2019 -0700 + + Suspend migration schedules if the local clusterdomain is inactive + +commit 2e7d881098d6810e54572f702db58d0402b37407 +Author: Dinesh Israni +Date: Wed May 22 15:11:00 2019 -0700 + + Trigger update for cluster domain status when updating domains + +commit ee5a514e89ffd7fb9a95cb84352f2de7823ba490 +Author: Dinesh Israni +Date: Wed May 22 14:28:18 2019 -0700 + + Portworx: Populate local domain in cluster domain status + +commit 3b3b88ad83c66e3982e52d991d3988b544f50327 +Author: Dinesh Israni +Date: Wed May 22 15:01:17 2019 -0700 + + Add local domain to clusterdomain status + + Also updated storkctl to print the local domain + +commit dabd39289000cd0627c5d357e97c17e8546549d0 +Author: Dinesh Israni +Date: Tue May 14 15:32:34 2019 -0700 + + Fix schedule to trigger if next trigger is exactly at current time + + Also added UT + +commit 9ae8cbc32804087dde87c78b25b7ac67e402e843 +Author: Dinesh Israni +Date: Mon May 20 22:33:58 2019 -0700 + + Revert "Use correct spec for extender integration test" + + This reverts commit 27b8690fde2068f3cbbc29ad48c42f7d4f44e61f. + +commit 27b8690fde2068f3cbbc29ad48c42f7d4f44e61f +Author: Dinesh Israni +Date: Mon May 20 14:31:42 2019 -0700 + + Use correct spec for extender integration test + +commit 068f895d4fe93f5ef1b2160f4b247b6d88ac7f1d +Author: Dinesh Israni +Date: Thu May 16 17:50:12 2019 -0700 + + Add deploymentconfig to activate migrations for storkctl + +commit f6a79a813f095495cc5787392d5f06225797bc61 +Author: Dinesh Israni +Date: Sat Apr 20 14:57:37 2019 -0700 + + Add application manager package + + This will start the controller for all the application specific operators + +commit 35fc3c825d6a0d10f6b06793b6bbc0c936bd476d +Author: Dinesh Israni +Date: Mon Apr 15 18:20:55 2019 -0700 + + Add CRDs for BackupLocation + + Used to specify the objectstore where backups can be stored. + Location is kept generic enough so as to allow non-objectstore targets to be + specified in the future. + The config can be provided inline or through a secret. + + Issue #284 + +commit 64e7b605394a7891ac86ab9e2bb69568750ac4ef +Author: Dinesh Israni +Date: Mon Apr 15 15:17:47 2019 -0700 + + Add CRDs for Application Backup and Restore + + Issue #284 + +commit 96ea49d2be33a9e965d6f34a982ecb285e3d46e6 +Author: Aditya Dani +Date: Mon May 13 18:21:28 2019 -0700 + + vendor update from openstorage + + - Fix openstorage pkg/grpcserver memory leak when grpc endpoint is incorrect. + +commit dbcc8060c420dbe8dc9c94ff9731bf6a1708881b +Author: Dinesh Israni +Date: Mon May 6 14:38:25 2019 -0700 + + Add debug package to dump profiles + + SIGUSR1 can be used to dump memory and goroutine info + SIGUSR2 can be used to toggle collection of cpuprofile + +commit bbf85035f7263ab99d1244aa1b35332a9c6c3ff8 +Author: Dinesh Israni +Date: Mon May 13 18:28:46 2019 -0700 + + Fix matching of clusterdomain list + +commit c5e9722ba34b35729049d28212b333ec377660bd +Author: Aditya Dani +Date: Mon May 13 11:21:08 2019 -0700 + + Print an error log when the controller fails to fetch cluster domain info. + + Signed-off-by: Aditya Dani + +commit bf4c7b2c7d41d41a80cd1929abe563db21b37ae4 +Author: Rohit-PX +Date: Fri May 10 18:20:03 2019 -0700 + + Use standard-verbose for gotestsum output. + + Signed-off-by: Rohit-PX + +commit a97aef1ccaeee4c8d409a62faf3b7de2b4e79746 +Author: Dinesh Israni +Date: Thu May 9 19:25:28 2019 -0700 + + Update vendor dependency + +commit 2ead79c6fb794609b89520943bdad30194f0bd05 +Author: Dinesh Israni +Date: Thu May 9 19:24:46 2019 -0700 + + Add support to collect some additional resources + + - Role + - RoleBinding + - Ingress + + Also deleting loadBalancerIP from service resource + +commit 5434fb612d1075a6767a217b1e7eae4279938762 +Author: Rohit-PX +Date: Thu May 9 13:29:34 2019 -0700 + + Use gotestsum for stork test. + + Signed-off-by: Rohit-PX + +commit 2f53393411a0fd14bc1cca4acf2b3f5ffff53004 +Author: Dinesh Israni +Date: Wed Apr 24 19:45:35 2019 -0700 + + Update google cloud sdk and use python3 in container + + Also update some python libraries + +commit 4a52ec8419ceac20e133cfb9d69398a95fde8be4 +Author: Dinesh Israni +Date: Tue May 7 14:47:28 2019 -0700 + + Add field in migration spec for admin cluster pair + + This can be used to migrate cluster scoped resources if an admin doesn't want to + provide access for those to individual users. The admin cluster pair needs to be + created in the admin namespace. + +commit 17485bdf10df161d0448d9ad17db4c936fbb35fa +Author: Dinesh Israni +Date: Tue May 7 13:33:57 2019 -0700 + + Update node info to have storageID and schedulerID + + Also update portworx driver to populate both for the nodes + +commit be02a94a680f8a424a242f71d3bf2b1aeed5cbbb +Author: Dinesh Israni +Date: Mon May 6 14:41:17 2019 -0700 + + Vendor update for sched-ops + + Fixes a memory leak when watches are re-established + +commit e3f9d69395b7cd8d8e7566cfbb6318fbaf8f1b17 +Author: Dinesh Israni +Date: Thu May 2 15:58:49 2019 -0700 + + Add support to collect some ocp resources + + New supported resources are + - DeployemtConfig + - ImageStream + - Route + +commit 66189880a3f16478575144d3ed9c025b21d7a869 +Author: Dinesh Israni +Date: Thu May 2 15:58:15 2019 -0700 + + Vendor update for DeploymentConfig + +commit 286c9dea6e4e54752102c5397c64b0178c5e6653 +Author: Dinesh Israni +Date: Wed May 1 17:47:33 2019 -0700 + + Vendor update for sched-ops + +commit 94dff687c8a48895df3ed1c108304cf983241447 +Author: Dinesh Israni +Date: Wed May 1 16:23:10 2019 -0700 + + Update watch API in health monitor + +commit e32745c8f534a33b61185991bae37288d75c140b +Author: Dinesh Israni +Date: Mon Apr 15 20:01:10 2019 -0700 + + Move resource collection logic from migration controller + + * Can be used by other modules to get unstructured objects from a namepsace + and matching label selectors + * Added support to collect clusterrolebindings inlcuding users and groups for a + namespace + * Added merging of clusterrolebindings when applying resources + +commit 79c82f809f739aea8645fdc0f1faa7fdfc505aa2 +Author: Dinesh Israni +Date: Thu May 2 14:12:18 2019 -0700 + + Increase wait time in snapshotschedule test + + Gives it enough time to wait for the trigger and status to be updated + +commit a529afe888ac3bf6b6451fc1fe3bb73ea48784f4 +Author: Dinesh Israni +Date: Thu Apr 25 16:35:57 2019 -0700 + + Upload storkctl from master branch to master path instead of latest + +commit 43cb1fd905460b6797e68684c96754cdf3dbcf36 +Author: Tapas Sharma +Date: Wed Apr 17 13:03:52 2019 -0700 + + Define CRD for application cloning + 0. This checkin defines the first level CRD for cloning applications + 1. Added replace policy to the spec + 2. Added ApplicationClone and ApplicationCloneList to register.go + 3. Added ResourceInfo and VolumeInfo to the status of clone + 4. Removed Namespace from the volumeInfo and resourceInfo + + Signed-off-by: Tapas Sharma + +commit fd28b56985709e0effa9e297f224089ec1cdebaa +Author: Dinesh Israni +Date: Sat Apr 20 14:58:51 2019 -0700 + + Remove the test directory from google cloud sdk + +commit 4082186164c46157a19d7eb3001026d586ba3cb5 +Author: Dinesh Israni +Date: Sat Apr 13 20:28:23 2019 -0700 + + Switch gosimple to staticcheck and fix errors + + Also added static analysis checks for unit and integration tests + Added gocyclo target in Makefile but not enabled until we fix the issues + + Issue #287 + +commit 4aab57ef7aa302767244bac7cbb3254ba7300229 +Author: Dinesh Israni +Date: Fri Apr 12 19:22:30 2019 -0700 + + Add events to pod from extender in case of errors + +commit 8ed66053a3020211573ca4466a880c26b1bec257 +Author: Dinesh Israni +Date: Mon Apr 15 21:34:37 2019 -0700 + + Bump version to 2.3.0 + +commit 10a46387bce2dd3176780a339154ca9c9e58dfc6 +Author: Dinesh Israni +Date: Mon Apr 15 21:34:21 2019 -0700 + + Revert "Update travis for 2.2 branch" + + This reverts commit 392c882377a6c0efbe3fe3e56f9800460ad8f236. + +commit 392c882377a6c0efbe3fe3e56f9800460ad8f236 +Author: Dinesh Israni +Date: Wed Jan 30 19:23:58 2019 -0800 + + Update travis for 2.2 branch + +commit 5a0eac79d2bfeb1207a905b8690d66157b9d0d8f +Author: Aditya Dani +Date: Sun Apr 14 17:59:34 2019 -0700 + + vendor updates from sched-ops + +commit 6547b18b705e92d0e6e93d0bcffe5a3a55e87954 +Author: Aditya Dani +Date: Sun Apr 14 17:58:53 2019 -0700 + + New specs for cluster domain integration tests + +commit 2ec3df6edf65b60f5468128ee54d53d7d929510b +Author: Aditya Dani +Date: Sun Apr 14 17:59:09 2019 -0700 + + Add integration tests for ClusterDomains. + +commit 89ed743b9df156b55af9e587e7a6cb000bffc4dc +Author: Dinesh Israni +Date: Sat Apr 13 09:42:30 2019 -0700 + + Validate PVC before checking for auto created schedule + +commit 31d35f9ca641d398f6be88e91e98f611232bebe5 +Author: Aditya Dani +Date: Fri Apr 12 18:46:38 2019 -0700 + + Normalize the clusterID before using it is a kubernetes resource name for ClusterDomainsStatus + +commit 5d9037d078ebb26eb9bf9f76043637a342760138 +Author: Dinesh Israni +Date: Fri Apr 12 13:06:23 2019 -0700 + + Add integration test for snapshotschedule created using storageclass + +commit 24c788d5492bb29cdb6c7cb62ede3bc2913b0cdc +Author: Dinesh Israni +Date: Fri Apr 12 16:51:25 2019 -0700 + + Return nil when clusterpair is deleted without storage options + +commit f7d64f815e51190a576984a02952b3b9c7deecee +Author: Dinesh Israni +Date: Fri Apr 12 16:08:13 2019 -0700 + + [Portworx] Return error if starting migration fails + + Previously the status was being set as failed. Returning error + ensures that the migration will be retried and events will be raised + +commit b357a1f4c8f21107929cb763b2d74ccda8197a54 +Author: Dinesh Israni +Date: Fri Apr 12 12:33:22 2019 -0700 + + Fix name in permission for cluster domain status object + +commit 63a164d98b11b59b51c73e697acdd26f44db8309 +Author: Dinesh Israni +Date: Wed Apr 10 20:18:26 2019 -0700 + + Set default reclaim policy for snapshotschedule + + Also use time from schedule pacakge for creation time to help with integration + test + +commit de8bfc15b077ed011072c0bd0b19862c1ecf2e5a +Author: Dinesh Israni +Date: Wed Apr 10 20:17:57 2019 -0700 + + Vendor update for sched-ops + +commit 80c792294d4602ebe236ba024f753a40098fc1c5 +Author: Dinesh Israni +Date: Mon Apr 8 12:33:34 2019 -0700 + + Integration tests for snapshot schedules + +commit 0b5389ad65e1f0fc7c511380838f8ae6061348b9 +Author: Grant Griffiths +Date: Wed Apr 10 17:00:17 2019 -0700 + + [Portworx] Add IATSubtract option to auth token options + + Signed-off-by: Grant Griffiths + +commit e18b842d07343fe74bdb64b41775c7d62458b630 +Author: Grant Griffiths +Date: Wed Apr 10 16:59:34 2019 -0700 + + Openstorage vendor update + + Signed-off-by: Grant Griffiths + +commit 5b81697e06891e184c87828052175a51b1d636a0 +Author: Dinesh Israni +Date: Wed Apr 10 09:30:36 2019 -0700 + + Add permissions for clusterdomain CRDs + +commit 61ac6b416c81dbaaabac1635c8b19fb0c77baad7 +Author: Dinesh Israni +Date: Mon Apr 8 19:10:35 2019 -0700 + + [Portworx] Take full cloudsnap for weekly and monthly schedules + +commit 1681683289f2ab5705f4c71012fa3388ca92a210 +Author: Dinesh Israni +Date: Mon Apr 8 18:38:59 2019 -0700 + + [Portworx] Fix cloudsnap status check + + If status is present in VolumeSnapshotData return that, + else use the taskID to query the status instead of the volumeID + +commit c55607d93f483087c3ea4fca8e1aeadcf3ef9581 +Author: Dinesh Israni +Date: Mon Apr 8 18:38:21 2019 -0700 + + [Portworx] Add owner information to cloudsnaps + +commit eb8fd1b2a6981a0c95dd078fce6629c101f03c61 +Author: Dinesh Israni +Date: Mon Apr 8 18:36:38 2019 -0700 + + Add labels to scheduled snapshot descrbing the schedule + +commit 4eed9a6c422ee46da39b17f3dbf05f2b47fa9413 +Author: Dinesh Israni +Date: Mon Apr 8 18:35:54 2019 -0700 + + Vendor update for snapshot from external-storage + +commit 097d4b1f62335b96e27cbfa9957c40b2d55c9601 +Author: Aditya Dani +Date: Tue Apr 9 17:30:28 2019 -0700 + + Ignore already exists error when creating clusterdomainsstatus CRD + +commit ffeddfbfaafa8baaa0d6dc415742d708953af80d +Author: Aditya Dani +Date: Tue Apr 9 17:18:26 2019 -0700 + + Portworx Driver: Use cluster.Enumerate instead of cluster.Uuid. + +commit e3f339e07195dc706cb403b870ca8b341efe7684 +Author: Aditya Dani +Date: Mon Apr 8 13:34:54 2019 -0700 + + Portworx: Return an error when clusterID is empty + + Signed-off-by: Aditya Dani + +commit ea22ac2257f05a98eb549e75c275537ca63599b1 +Author: Dinesh Israni +Date: Wed Apr 3 22:29:15 2019 -0700 + + [Portworx] Only check version for online nodes + +commit 984d15b4c35019620355708345af68f39f93f163 +Author: Dinesh Israni +Date: Wed Apr 3 22:28:43 2019 -0700 + + [Portworx] Add parsing for cluster pair mode + +commit cd421d22267464757a07362ca4c71c0ae289dabd +Author: Dinesh Israni +Date: Wed Apr 3 22:28:16 2019 -0700 + + Vendor update for openstorage + +commit a33ab6d5f921011627b31e8d2ffee09adeb15320 +Author: Dinesh Israni +Date: Wed Mar 27 23:14:13 2019 -0700 + + [Portworx] Mark canceled migrations as Failed + +commit 0c9f5e45383072e88d09950e7f23dffd5e60a683 +Author: Luis Pabón +Date: Sun Mar 31 19:54:01 2019 -0700 + + Vet/Lint issues + +commit 11c7312c5cf55772d1ac41fca3e684587f913fae +Author: Luis Pabón +Date: Sun Mar 31 19:34:14 2019 -0700 + + ClusterDomain supports auth + +commit 19f5865bebd0745081dc6689e43a5a3ed3eed00d +Author: Luis Pabón +Date: Sun Mar 31 16:23:45 2019 -0700 + + TLS in Grpc + +commit 43cd3d7540e47af54fb1b14d179583744076079b +Author: Luis Pabón +Date: Thu Mar 28 22:32:29 2019 -0700 + + Support for OpenStorage Auth + + Based from work on #302 by Paul Theunis + + Signed-off-by: Luis Pabón + +commit 8a9f655bc752c43e0bbf79363859eab6cb746cc7 +Author: Luis Pabón +Date: Thu Mar 28 22:32:08 2019 -0700 + + Vendor updates + +commit 218aba57bed9e70fedc706ff3be05a3ded5cdb07 +Author: Aditya Dani +Date: Sat Mar 30 10:32:07 2019 -0700 + + Add constraint on torpedo to branch stork-2.2 + +commit ef0b429d6352bb8081ac566697bfb356f4151bb0 +Author: Rohit-PX +Date: Thu Mar 28 15:09:56 2019 -0700 + + Integration test - restore from local group snapshots. + + Signed-off-by: Rohit-PX + +commit 6775ef28c42bc64965a0de278c51bb79de3b850e +Author: Aditya Dani +Date: Fri Mar 29 17:06:43 2019 -0700 + + Added controllers for ClusterDomainsStatus and ClusterDomainUpdate CRD + + Signed-off-by: Aditya Dani + +commit 622fb0d4aaf4158a466281cdd3917c43ab9880e0 +Author: Aditya Dani +Date: Fri Mar 29 17:06:00 2019 -0700 + + storkctl changes for cluster domains + + Added the following commands for storkctl to manage clusterdomains + + - storkctl get clusterdomainsstatus + - storkctl get clusterdomainupdate + - storkctl activate clusterdomain + - storkctl activate clusterdomain --all + - storkctl deactivate clusterdomin + + Added UTs for storkctl + +commit 9e6c0f84f16032679df8cf55b56a49f8d07f0d31 +Author: Aditya Dani +Date: Fri Mar 29 17:04:47 2019 -0700 + + Add Driver APIs for ClusterDomain changes + + Added the following new Driver APIs and implemented them for Portworx + - GetClusterID + - GetClusterDomains + - ActivateClusterDomain + - DeactivateClusterDomain + + Signed-off-by: Aditya Dani + +commit 5f2c707bad9303d2e7956d856f6afddd1dec8705 +Author: Aditya Dani +Date: Fri Mar 29 17:03:10 2019 -0700 + + Update stork's vendor for ClusterDomain changes + + - openstorage -> release-6.0 + - gossip + - sched-ops + + Signed-off-by: Aditya Dani + +commit f21dae3a1eddf214ef1e9a04fee2af7bd8c33f40 +Author: Harsh Desai +Date: Sat Mar 23 18:46:01 2019 -0700 + + storkctl support for group snapshots + + Fixes #226 + + Signed-off-by: Harsh Desai + +commit e1c479e7ff2ff9d8dc3adccb40a6f532bc64c86f +Author: Aditya Dani +Date: Fri Mar 29 13:14:34 2019 -0700 + + Make ClusterDomain CRDs cluster scoped. + +commit 3df4dfb71ff5692e621d4caf6deb948373844945 +Author: Dinesh Israni +Date: Wed Mar 27 20:07:19 2019 -0700 + + Store finish timestamp for migration + + Also display elapesed time using storkctl + +commit f814202e9092bd289ecf72c072f87a0cdecbdb7b +Author: Dinesh Israni +Date: Thu Mar 28 00:03:16 2019 -0700 + + Add pod watch permission required for health monitor + +commit ddecad8f1d7585ee555e47e8107b7c8242497933 +Author: Harsh Desai +Date: Mon Mar 25 14:06:52 2019 -0700 + + sched-ops vendor + + Signed-off-by: Harsh Desai + +commit 1b8da1a3993a30ac3a1e6b1235d5e7b14fbaae18 +Author: Harsh Desai +Date: Mon Mar 25 14:06:42 2019 -0700 + + Add health monitor for unknown pods + + Signed-off-by: Harsh Desai + +commit 356d61ce53a55a3ccb28aafce9a42c60e93e4846 +Author: Dinesh Israni +Date: Tue Mar 26 21:18:33 2019 -0700 + + Fix bumping time in migration schedule test + + Adding 31 days for the monthly schedule test can cause the schedule to be + skipped for months with 30 days since the date of the month will be one ahead + +commit d66c40c2e2eb0d1e19cea741f215fc52630cd706 +Author: Dinesh Israni +Date: Tue Mar 26 18:04:24 2019 -0700 + + Add support to migrate additional resources + + Following resources will also be migrated now: + - DaemonSets + - ServiceAccounts (except default) + - ClusterRoles (if used in the namespace) + - ClusterRoleBindings (if used in the namespace) + +commit 9622679af806e7a74150b786ebd72717dac5367b +Author: Dinesh Israni +Date: Tue Mar 26 18:04:02 2019 -0700 + + Vendor update for sched-ops + +commit 946e005209ae0183ea303d71069d121b4c899813 +Author: Dinesh Israni +Date: Mon Mar 25 23:34:12 2019 -0700 + + Fixes for migration schedule tests + + - Sleep before checking for error message for invalid schedule. There could be + an error because the policy was not found since they are applied in a different + order. The next reconciliation will be after a minute + - Rollover to the next month/year when finding the right day + +commit af7c178bb47e4e0582daa83932c196fd27cbd16f +Author: Dinesh Israni +Date: Mon Mar 25 15:32:30 2019 -0700 + + Add annotation in PVC after creating snapshot schedule through SC + + This prevents the schedules from getting re-created if a user manually deletes + them + +commit ed8ce5f20b2f77f4b0c1c34ebd4a1a53b0f25c87 +Author: Dinesh Israni +Date: Fri Mar 22 20:08:01 2019 -0700 + + Add validation for interval policy + +commit 1cf300da36884670a4cb7d9699cc4344a966a888 +Author: Dinesh Israni +Date: Thu Mar 21 22:59:25 2019 -0700 + + PVCWatcher: Ignore error if storageclass is not found during update + +commit 6d1c5bcc551af0ad1647e0c001c1800f738ac76c +Author: Dinesh Israni +Date: Thu Mar 21 22:52:16 2019 -0700 + + Add missing permissions for volumesnapshot schedules + +commit 7bcf3b527ed4e102b785fa25b2993cb4e0c98e08 +Author: Aditya Dani +Date: Fri Mar 22 12:08:42 2019 -0700 + + Allow configuring portworx service in stork + + - Get the service name and namespace from env variables. + - Find the ports which PX uses from kubernetes service. + + Signed-off-by: Aditya Dani + +commit 04ed0cea2414e3e6ad56ce0dfb19a60d2dbfb000 +Author: Rohit-PX +Date: Thu Mar 21 18:23:11 2019 -0700 + + Add node start and create separate storageclass for pvcownership test + + Signed-off-by: Rohit-PX + +commit da61b7e36da05cbf7e64fd2de6633b999f351587 +Author: Dinesh Israni +Date: Thu Mar 21 00:21:33 2019 -0700 + + Start snapshot controller first + +commit dfafbe4b7f63f500fc2231a21f67c4a832e6c821 +Author: Dinesh Israni +Date: Wed Mar 20 23:59:52 2019 -0700 + + Update vendor for torpedo + +commit b48d80f423f817d0bd7fd7fb98eb8ae0306aa956 +Author: Rohit-PX +Date: Tue Mar 19 12:00:19 2019 -0700 + + New test for verify pvc ownership fix. + + Signed-off-by: Rohit-PX + +commit e75eb59e8b775f3b3e4c33e9797037b07b05d072 +Author: Dinesh Israni +Date: Wed Mar 20 12:58:55 2019 -0700 + + storkctl: Add all-namespaces param for subcommands + + migrationschedule and snapshotschedule were missing the param + +commit c08231487e32d8c8f78c7ab233e431c264d735ed +Author: Aditya Dani +Date: Wed Mar 20 15:22:12 2019 -0700 + + Generated code for ClusterDomains CRD + +commit 6df7f550e09a7d954ed818609b3b42ff1d2ddbf7 +Author: Aditya Dani +Date: Wed Mar 20 15:20:14 2019 -0700 + + Add CRDs for cluster domains + + - ClusterDomainsStatus + - ClusterDomainUpdate + +commit b79b7265ff5293dd2ad26018340b0ed5a622115e +Author: Harsh Desai +Date: Fri Feb 22 18:43:45 2019 -0800 + + sched-ops and torpedo changes + + Signed-off-by: Harsh Desai + +commit c7a30a22f354fac788efcfd9e0539e003e637d82 +Author: Harsh Desai +Date: Thu Feb 21 17:49:02 2019 -0800 + + Integration tests for migration schedules + + Signed-off-by: Harsh Desai + +commit 4543fbfff2420a643820135965b4b0c843be5b9b +Author: Dinesh Israni +Date: Wed Mar 13 14:18:45 2019 -0700 + + Add controller to watch for changes on PVCs + + Right now it creates snapshot schedules if specified in the + storageclass for a PVC + +commit 47c3eb8229d379c23ae11a825466d039b0ea07a3 +Author: Dinesh Israni +Date: Wed Mar 13 14:17:30 2019 -0700 + + Add storkctl subcommands for snapshotschedule + +commit 3dfe119e2f26017c3ad3053b868f03360cbee54b +Author: Dinesh Israni +Date: Wed Mar 13 14:15:16 2019 -0700 + + Add controller for snapshot schedule + + * Similar to migration schedule, uses retain to decide how many snapshots to + keep + * Enable parameter can be used enable/disable a schedule + * ReclaimPolicy determines what happens to the snapshots triggered by the + schedule when the schedule is deleted. Setting to Delete will automatically + delete the snapshots when the corresponding PVC is deleted. Setting to Retain + (default) will not delete the snapshots. + * Moved the snapshot controllers under one path + + Issue #72 + +commit 5bdd1ec5fe7f6236eb0e4fd1ef5e2dfb46a5a15b +Author: Dinesh Israni +Date: Wed Mar 13 14:11:36 2019 -0700 + + Vendor update for sched-ops + +commit 0d77e6352a29efa2bd43b77b34258a85d4d13b17 +Author: Dinesh Israni +Date: Wed Mar 13 14:11:08 2019 -0700 + + Add CRD for volume snapshot schedules + + Issue #72 + +commit 40ca113d184edf8f44f8b3fcfd900dee8e54caa4 +Author: Dinesh Israni +Date: Tue Mar 19 16:52:33 2019 -0700 + + Add suspend flag to migrationschedule spec + + It is disabled by default + Also update storkctl for the flag + +commit f521df476cdb00a3107647102201465ec6ab8ddd +Author: Dinesh Israni +Date: Tue Mar 19 16:08:52 2019 -0700 + + Extender: Return error if no replicas are online for a volume + +commit 55fc1bfff71c7bd3705593c34a20743dc21c7a56 +Author: Dinesh Israni +Date: Thu Mar 14 17:47:10 2019 -0700 + + [Portworx] Don't rely on storage class to determine ownersip of PVC + + For statically created PVCs there might not be a storage class or annotations + with the provisioner. Looks at the volume source in that case to figure out + the owner + +commit 9a577f63072d4e7c374adc82728181649bd9e832 +Author: Dinesh Israni +Date: Wed Feb 27 23:53:47 2019 -0800 + + Add integration tests for migrating with label selectors + + Issue #271 + +commit c24cad3c69fd155763e4d9a18105b357bd71425f +Author: Dinesh Israni +Date: Wed Feb 27 18:36:17 2019 -0800 + + Add support to use labels to select resources during migration + + Issue #271 + +commit 2cd05389885a9fcdf8dabc480689eebff4ef086c +Author: Dinesh Israni +Date: Tue Mar 12 14:53:43 2019 -0700 + + Update bool in migration specs to pointers so that we can set defaults + +commit 481eeb5564638fa4e62098c9bc75e2d47c23d2ab +Author: Dinesh Israni +Date: Mon Feb 11 15:47:40 2019 -0800 + + Added UTs for activate/deactivate subcommands + +commit 6853c3582c43212fb038f7003cb229482c27478d +Author: Dinesh Israni +Date: Mon Feb 11 13:41:14 2019 -0800 + + Portworx: Return basic volume info in GetPodVolumes() even if Inspect fails + +commit f701e8e55be7cf4ad4c0ff945f1f619057533af7 +Author: Dinesh Israni +Date: Mon Feb 11 13:39:35 2019 -0800 + + Add subcommands to activate and deactivate migrated applications + + The commands look at the deployments and statefulsets in the given namespaces + and update the replica count if they have an annotation specifying that the + application was migrated using stork + +commit a21a6f2af26064b5d39e5a7bd6a46f40b13bfc5c +Author: Dinesh Israni +Date: Mon Feb 4 14:52:04 2019 -0800 + + Add option to skip pairing of storage in ClusterPair + + Also added option in migration spec to skip migrating volumes + This is helpful in cases where the same storage is available from multiple + Kubernetes clusters + +commit cb95e0e650891e2b683896df8951995ec4d755a3 +Author: Dinesh Israni +Date: Thu Feb 28 16:49:43 2019 -0800 + + Recreate service during migration for conflicting node port + +commit 5bb9a6892dfd4ba3165b00c5005492e9013bf89f +Author: Dinesh Israni +Date: Thu Feb 28 16:49:25 2019 -0800 + + Update vendor dependencies + +commit 940f663d48880cfc6c0bfa28564bec9e19247921 +Author: Dinesh Israni +Date: Tue Feb 12 17:38:34 2019 -0800 + + Added subcommands to storkctl for SchedulePolicy and MigrationSchedule + + - Added create, get and delete for MigrationSchedule + - Added get for SchedulePolicy + - Added UTs for both resources + +commit 248c6992c5bb116e961ce5b16e192cdf4faa9455 +Author: Dinesh Israni +Date: Tue Feb 12 17:35:00 2019 -0800 + + Add a controller for migration schedules + + - The MigrationSchedule object takes the same parameters as Migration Spec + - It also takes in a SchedulePolicyName + - The reconciler checks every minute if any new migrations need to be triggered + - The status for each migration is stored in the object. Only one successful + status is stored + - Only one migration can be triggered at a time + - The schedule package is used to check is a migration should be triggered for + each type of policy. + - Includes UTs for the schedule package to make sure triggers will fire + correctly + +commit e24ce0105d93c9d9b3baa1790f45a81c0b00ba55 +Author: Dinesh Israni +Date: Wed Feb 13 16:51:20 2019 -0800 + + Update MigrationSchedule CRD to add Migration under Template.Spec + + This is similar to how deployments, statefulsets, etc have pod spec defined + +commit d0f8a22a1e4070ba70c132e6e4535d1a2152d19b +Author: Harsh Desai +Date: Tue Feb 19 17:26:52 2019 -0800 + + Add support for configurable retry count + + Signed-off-by: Harsh Desai + +commit b6873b3f8542edf76699a067e9f3bbd66f7582e2 +Author: Harsh Desai +Date: Wed Feb 20 12:02:47 2019 -0800 + + add pull request template + + Signed-off-by: Harsh Desai + +commit 9efcc832d41163c46a09d57fcef03f4740e017a8 +Author: Harsh Desai +Date: Mon Feb 18 09:45:54 2019 -0800 + + Allow users to update restore namespaces + + Signed-off-by: Harsh Desai + +commit f2bb4f6a1f57700dee981510e35a36af6941a766 +Author: Joost Buskermolen +Date: Thu Feb 21 11:04:32 2019 +0100 + + Fixed a typo + +commit a21adfd82e8a01dba4a5ba2a96921de4bbe293b0 +Author: Harsh Desai +Date: Mon Feb 18 14:58:47 2019 -0800 + + Don't run fio load specs for scale test + + Signed-off-by: Harsh Desai + +commit 9386f9a13e8a77c3a27c233132b54f77d6c53213 +Author: Harsh Desai +Date: Sat Feb 16 16:02:48 2019 -0800 + + update sched-ops vendor to PR branch + + Signed-off-by: Harsh Desai + +commit 01638e4524a756e7fce0d365e044463305b9e824 +Author: Harsh Desai +Date: Sat Feb 16 15:17:00 2019 -0800 + + Add test to load volumes while group cloudsnap is being done + + Signed-off-by: Harsh Desai + +commit 00e2c0d0f8dfff8f471fc56770c8108db57a88e3 +Author: Harsh Desai +Date: Fri Feb 15 08:12:30 2019 -0800 + + For groupsnap scale test, first create all and then verify + + Signed-off-by: Harsh Desai + +commit d4c34c6e2ec37143307e4b53acb7db13e8c5e4fa +Author: Harsh Desai +Date: Fri Feb 15 11:47:13 2019 -0800 + + Use version pkg to compare resource versions + + Signed-off-by: Harsh Desai + +commit 2338d1d42bf531859a85c3c73b3c17d68c4ae5c1 +Author: Harsh Desai +Date: Fri Feb 15 09:02:51 2019 -0800 + + Return status for failed cloudsnapshots + + - is any cloudsnap has failed, the get status API should not fail. Rather it should + return the failed tasks in the response so controller can log an event and then + reset and retry the group snapshot + + Signed-off-by: Harsh Desai + +commit 2747b0908d60d4fa8c07992c74e36a4bce7deac5 +Author: Dinesh Israni +Date: Thu Feb 14 21:53:06 2019 -0800 + + Don't override pod variable when running command in the pod + +commit 6b36e9a4558b68e5f07ebfad017d7aa55d48db0b +Author: Dinesh Israni +Date: Thu Feb 14 17:30:12 2019 -0800 + + Vendor update for sched-ops + +commit 25e3f7ae33e60befbdf98af45fe1c91872ea676a +Author: Dinesh Israni +Date: Wed Feb 13 18:58:09 2019 -0800 + + Add gofmt to Makefile and fix errors that were found + +commit fcee7aeeff2e21cdb1463e31e11019a6a8c90f47 +Author: Dinesh Israni +Date: Wed Feb 13 15:10:31 2019 -0800 + + Add --all-namespaces parameter to storkctl for get subcommands + + Also prints the namespace if the parameter is specified similar to kubectl + +commit 0e95b0854fd79a33609268acce27cc407bcf1eff +Author: Dinesh Israni +Date: Wed Feb 13 15:09:59 2019 -0800 + + Govendor update for sched-ops + +commit ecb1590558e592f87581c56edf484e33eda54940 +Author: Harsh Desai +Date: Wed Feb 13 11:51:14 2019 -0800 + + propagate group snap annotations to child volumesnapshots + + Signed-off-by: Harsh Desai + +commit f1bbe522f8fa020a7dc21900cfc267b62702277e +Author: Dinesh Israni +Date: Wed Feb 13 15:58:13 2019 -0800 + + Copy storkctl for all architectures into the container + +commit 27f86f64a9369d1faec44cf494b1eac048ab199a +Author: Dinesh Israni +Date: Tue Feb 12 15:17:37 2019 -0800 + + Add CRDs for SchedulePolicy and MigrationSchedule + +commit f2d62e6ad10cafb1d0b91c27994259048ecd5917 +Author: Dinesh Israni +Date: Tue Feb 12 15:16:52 2019 -0800 + + Split up CRDs into different files + +commit 90214bd5013f7f435d969ada68d6d33e67d5418c +Author: Harsh Desai +Date: Tue Feb 12 14:58:55 2019 -0800 + + Fix ready and pending snapshot conditions + + Signed-off-by: Harsh Desai + +commit b358d67e16cf592ae2d212e462659f72dc1e6352 +Author: Dinesh Israni +Date: Mon Feb 4 14:53:58 2019 -0800 + + Also set the namespace annotations from source cluster during migration + +commit d4fa569978ad4b5da75ef29dc1cf6459bf1b215f +Author: Dinesh Israni +Date: Fri Feb 1 15:07:21 2019 -0800 + + Portworx: Set snapshot type for group localsnap + + Also assume local snap if type isn't set + +commit 5196a74117e40b1fee20f922764f74bbae5efbea +Author: Dinesh Israni +Date: Wed Jan 30 19:51:16 2019 -0800 + + Reset kubeconfig in case of failure in migration test + +commit a3a4eaed6a8a983efcec4ac348292225366247f6 +Author: Dinesh Israni +Date: Wed Jan 30 19:19:15 2019 -0800 + + Update version to 2.2.0 + +commit fc42c75aeeb608368a2e84dc297a9bbc53e7f962 +Author: Dinesh Israni +Date: Wed Jan 30 19:03:24 2019 -0800 + + Fix storage class in cassandra integration test + +commit e0c32e2ffb6774c360bde4c86abba66171a8bca7 +Author: Piyush Nimbalkar +Date: Thu Dec 20 15:48:23 2018 -0800 + + Add a StorageCluster CRD to manage cluster + + - Added a controller which does nothing as of now except for watching over StorageCluster object + - The controller starts by default with stork, can be disabled using params + - Placement spec and node status in storage cluster CRD + - Using pointers for some fields so we can distinguish between an empty + value and default value + - Disabling cluster controller by default + +commit 16554cc4bb96be5f7076f5aa381ef40948b31de7 +Author: Dinesh Israni +Date: Fri Jan 25 13:38:34 2019 -0800 + + Add options to storkctl for migration pre/post exec rule + +commit ad6c521b2c3b4ddd6d9a2b2ab9bc3245d5a15755 +Author: Dinesh Israni +Date: Tue Jan 22 19:21:08 2019 -0800 + + Add integration tests for migration pre/post exec rules + +commit 612c8e77375a7cca8120be5c7b03cc2e327d80e3 +Author: Dinesh Israni +Date: Fri Jan 18 21:00:50 2019 -0800 + + Add Pre/Post Exec rules for migration + +commit 183a546a48bfd6df2a903afb90bbff60f29188ac +Author: Dinesh Israni +Date: Tue Jan 22 19:18:31 2019 -0800 + + Vendor update + +commit ee251e39c92022714516c76f3bf296fd5fa83e3b +Author: Dinesh Israni +Date: Tue Jan 29 18:51:51 2019 -0800 + + Update migration spec in integration test to point to correct namespace + +commit 9f690e7c9f9a4d980924fbeac9ad0a577bc5ea51 +Author: Dinesh Israni +Date: Tue Jan 29 17:14:12 2019 -0800 + + Portworx: Use volumeName for cloudsnaps during inspect after restore + +commit 997b84355986b7d52f9658550dda4ede2fd11ed8 +Author: Dinesh Israni +Date: Tue Jan 29 12:59:40 2019 -0800 + + Updated vendor for torpedo and sched-ops + +commit 1be1eb98499bd52c4bcc97e4b7520d32d50e6786 +Author: Harsh Desai +Date: Thu Jan 17 15:59:30 2019 -0800 + + Add integration tests for group local and cloud snaps + + Signed-off-by: Harsh Desai + +commit a9102584511b7742fa582b3e391a5387d89709e4 +Author: Dinesh Israni +Date: Tue Jan 29 14:02:51 2019 -0800 + + Portworx: Use volumeID instead of volume name after creating snapshot + +commit 4042abb54093f253b427b94987732a760796385c +Author: Dinesh Israni +Date: Tue Jan 15 18:11:16 2019 -0800 + + Don't delete clusterIP when migrating headless service + +commit 99de5e62d80bd530993ea99ee14305e23ef5f0cd +Author: Dinesh Israni +Date: Tue Jan 15 18:09:48 2019 -0800 + + Rename groupvolumesnapshot rule fields to PreExec/PostExec + + Same generic names will be used in migration to be consistent + +commit 3ede620bf41c6539417fb40487e115c87831ce4b +Author: Dinesh Israni +Date: Fri Jan 18 16:03:11 2019 -0800 + + Create directory for clusterpair spec in integration test + +commit 630d1f6b86b9bdf8fc10e01293bc82688b658efd +Author: Dinesh Israni +Date: Mon Jan 14 13:39:34 2019 -0800 + + Update Rule CRD + + Renamed spec to rules. Spec should only be used for objects that + have to be reconciled (ie have a status) + + Also updated the examples. + +commit a34edf5799cfa146e72f62a8a18d3b0a0019eaac +Author: Harsh Desai +Date: Fri Jan 18 12:17:15 2019 -0800 + + check for nil annotations on rule + + Signed-off-by: Harsh Desai + +commit ea08905b606590a62476855c0e76ab37358f4db2 +Author: Harsh Desai +Date: Thu Jan 17 09:34:11 2019 -0800 + + minimum resource version tracking needs to be per group snapshot + + Signed-off-by: Harsh Desai + +commit 17dc4dee9f0945c0f4f3f55832c7859ab88ac7d9 +Author: Harsh Desai +Date: Wed Jan 16 15:45:01 2019 -0800 + + review comments + + - don't use group snapshot from caller on errors + - raise event for pod not found + - use map for duplicates + + Signed-off-by: Harsh Desai + +commit c6b1d371c602baf9b01e2aea2c78771521b9cfc2 +Author: Harsh Desai +Date: Tue Jan 15 12:22:18 2019 -0800 + + vendoring changes + + Signed-off-by: Harsh Desai + +commit 35711364ebc52cb8728cc24e92683c9ddeb6fdff +Author: Harsh Desai +Date: Wed Jan 16 13:26:56 2019 -0800 + + Fixes for rule recovery + + - Pods running rule commands were not being tracked in snapshot annotations + - Rule recovery was not getting invoked for group snapshots + - Existing pods in tracker were getting overridden by new pods + + Signed-off-by: Harsh Desai + +commit 5253d5052949d96b3bc5712cf6bcd6264cdbc037 +Author: Harsh Desai +Date: Wed Jan 16 14:06:50 2019 -0800 + + validate pre and post checks in initial stage + + Signed-off-by: Harsh Desai + +commit 3ae7cc09050ff59e4bd121dc96a1b83d640f8364 +Author: Dinesh Israni +Date: Mon Jan 14 13:53:11 2019 -0800 + + Update README.md + +commit 2d46472b0799caf7c0c761da0eb1edce9a335fb8 +Author: Harsh Desai +Date: Mon Jan 7 10:59:07 2019 -0800 + + vendoring changes for group snapshots + + Signed-off-by: Harsh Desai + +commit 706dd3e56f6af0425ef1cf57576fef1d6454a352 +Author: Harsh Desai +Date: Fri Dec 21 16:15:15 2018 -0800 + + Add controller for groupvolumesnapshot + + Signed-off-by: Harsh Desai + + Address review comments + + - process cloudsnap failed tasks + - revert local and cloudsnaps as soon as the first failure is observed + - allow deletes of legacy group snapshots + - group snapshot controller part of snapshot controller + - fix cassandra restore pvcs + - fix v1 imports + - fix duplicate events + - revert active cloudsnapshots too + - use groupsnap logger + - move specs to examples + - use explicit variables to track done and active IDs + + Signed-off-by: Harsh Desai + +commit d70a15dba2b384e421e7e44c82218dbf3c88291c +Author: Harsh Desai +Date: Thu Dec 20 16:01:41 2018 -0800 + + Add group volume snapshot CRD + + Signed-off-by: Harsh Desai + +commit af099b96ee61bce5d0eb2ef23dfffdb46700f31f +Author: Harsh Desai +Date: Mon Jan 7 11:38:31 2019 -0800 + + fix new gosimple checks + + - pkg/initializer/initializer.go:140:9: assigning the result of this type assertion to a variable (switch obj := obj.(type)) could eliminate the following type assertions: + + Signed-off-by: Harsh Desai + +commit 5ae0e1ffdc04af5059cfd80eb6239af3dbf52e50 +Author: Dinesh Israni +Date: Tue Dec 18 00:28:48 2018 -0800 + + Update alpine packages and add ca-certs during docker build + +commit b8bce235e77fca8d34e736879d5f82d64485e992 +Author: Dinesh Israni +Date: Mon Dec 17 19:00:27 2018 -0800 + + Update master version to 2.1 for next release + +commit beb8947e4a00d6b05a870798c0d07537087cf763 +Author: Dinesh Israni +Date: Mon Dec 10 07:27:43 2018 -0800 + + Fix version regex in Portworx driver + + Issue #216 + +commit c440015ec7e2fcd39d6f10af83f4e9fe1a5939dd +Author: Dinesh Israni +Date: Mon Dec 3 17:10:47 2018 -0800 + + Set theme jekyll-theme-cayman + +commit eb6edc454644c13869d95628cae85641cb3783c9 +Author: Dinesh Israni +Date: Mon Dec 3 11:59:05 2018 -0800 + + [Portworx] Also check for CSI provisioner name for ownership + +commit c6eebc91ca8828327fc96228d8a207f36fc89270 +Author: Dinesh Israni +Date: Fri Nov 30 15:55:58 2018 -0800 + + Bump version to 2.0.0 (#212) + +commit 612f0d327810e739476388386e0656ee723bae82 +Author: ram-infrac <35250819+ram-infrac@users.noreply.github.com> +Date: Fri Nov 30 22:45:09 2018 +0530 + + Create mysql app before scheduling clusterpair (#211) + + Signed-off-by: Ram Suradkar + +commit eb687e3c4ccb632f8875be841a3e6f0d46c94ce5 +Author: ram-infrac <35250819+ram-infrac@users.noreply.github.com> +Date: Fri Nov 30 12:45:57 2018 +0530 + + Add namespace to clusterpair and migration specs (#210) + + * Add namespace to clusterpair spec + + Signed-off-by: Ram Suradkar + + * Make migration and cluster pair in same specs + + Signed-off-by: Ram Suradkar + + * Add Name to clusterpair + + Signed-off-by: Ram Suradkar + +commit 1585dc5f02a75c3883b899f06353d635ae2c657f +Author: Dinesh Israni +Date: Thu Nov 29 19:16:34 2018 -0800 + + Add name and namespace when generating clusterpair + +commit 1233c99cfec41a9741100729e115360291fee6c6 +Author: Dinesh Israni +Date: Thu Nov 29 17:50:07 2018 -0800 + + Print stork version during startup + +commit 9ed43d91b03bfb4aa2e3bfe3c56a78e7225926c1 +Author: Dinesh Israni +Date: Wed Nov 28 19:22:38 2018 -0800 + + Allow configuring an admin namespace that can migrate all other namespaces + +commit 449b5c6117ad9f54e976d1fded6fab3ff8b8ba74 +Author: Dinesh Israni +Date: Wed Nov 28 19:14:06 2018 -0800 + + Add rule CRD register which was removed in code refactor + +commit 3609b8cc62a4e4e598804525bb22237b2920564d +Author: Dinesh Israni +Date: Wed Nov 28 20:17:28 2018 -0800 + + Add UTs for all the log APIs + +commit beb68d5ac109f245532678978763457617a11403 +Author: Dinesh Israni +Date: Wed Nov 28 19:04:29 2018 -0800 + + Update storkctl UTs to use base command + +commit a0866c5ab2006db8bf2d05f2255890199cdc5654 +Author: Dinesh Israni +Date: Wed Nov 28 00:39:40 2018 -0800 + + UTs for storkctl migration subcommand + +commit 9a57de713aff11c1bc88d58764af575a2b80a001 +Author: Dinesh Israni +Date: Tue Nov 27 09:33:42 2018 -0800 + + Add some more UTs for storkctl clusterpair commands + +commit e2ee65f042ee337b61fddcb14ac2db3e80953682 +Author: Dinesh Israni +Date: Tue Nov 27 09:33:26 2018 -0800 + + Update dependencies for k8s client-go and sched-ops + +commit c191e60c8afb0a2b8ac16f528c773d6a13de1adf +Author: Dinesh Israni +Date: Wed Nov 21 14:03:39 2018 -0800 + + [strokctl] Add return after checkErr and use namespace when creating migration + +commit a020787024f043f4de0fc91603c93ba766e7371b +Author: Dinesh Israni +Date: Wed Nov 21 14:04:46 2018 -0800 + + Add some more UTs for storkctl + +commit 91bfcd1f8ae0120faaf86f177ed7bc2ecd5696a6 +Author: Dinesh Israni +Date: Tue Nov 20 21:42:34 2018 -0800 + + Limit migration to namespace of migration object + +commit 9fa83ef7367c96ec9fcf95212fcb36e647a2259b +Author: Dinesh Israni +Date: Tue Nov 20 19:36:49 2018 -0800 + + Vendor update for sched-ops and torpedo + +commit ced3376a21109a8206189999bb97e98844d72bfa +Author: Dinesh Israni +Date: Tue Nov 20 17:18:18 2018 -0800 + + Change clusterpair and migration to be namespaced + +commit 118edf7180827650eff50cff6644286bc77d418f +Author: Dinesh Israni +Date: Tue Nov 20 21:56:17 2018 -0800 + + [Portworx] Add eta information to migration info + +commit 42f16dd296c9f84276230b29f17da2981de80594 +Author: Dinesh Israni +Date: Tue Nov 20 21:56:00 2018 -0800 + + Vendor update for openstorage + +commit 4ad2fd1a21b4128620f0db093c478f4dba2a5667 +Author: Dinesh Israni +Date: Tue Nov 27 09:38:11 2018 -0800 + + When logging pod info check for owner pointer before dereferencing + + Also added UT for pod log + +commit 1f416c77c53d18bb3f0ef678d199adfcd1cfd086 +Author: Dinesh Israni +Date: Tue Nov 13 14:04:12 2018 -0800 + + Fix incorrect status update after resource migration + +commit bf5b9227af48cd65463d196cbc6d1de98bacdbcc +Author: ram-infrac <35250819+ram-infrac@users.noreply.github.com> +Date: Tue Nov 20 14:26:48 2018 +0530 + + Add liveliness and readiness probe to mysql-1-pvc spec (#200) + + Signed-off-by: Ram Suradkar + +commit 78af18cfb5f2d5f737f425c4904b8327d36e2c61 +Author: Dinesh Israni +Date: Tue Nov 13 20:31:39 2018 -0800 + + [Portworx] Implement cancellation of migration + +commit a166c619ec5368d1a11e5dc14ebb97709d03cfad +Author: ram-infrac <35250819+ram-infrac@users.noreply.github.com> +Date: Sun Nov 18 17:26:04 2018 +0530 + + Add Basic sanity cloud migration integration tests (#176) + + * Govendor update for torpedo + + Signed-off-by: Ram Suradkar + + * Add Sanity Integration test for Cloud Migration + + - Read configMap and dump cluster kubeconfig info + - Get remote cluster Info + - create clusterpair spec file + - apply migration spec file + - wait for application running on remote cluster + + Signed-off-by: Ram Suradkar + + * Add createCRDObjects() under schedule interface + + -- added rescan API for specDir + + Signed-off-by: Ram Suradkar + + * Changed test to accomodate Schedule() call + + - move migrs directory under specs/ + - remove createCRDObjects() call + + Signed-off-by: Ram Suradkar + + * Change cluster pair constant to match options section + + - remove unnecessary log + + Signed-off-by: Ram Suradkar + + * Remove clusterpair parsing from stork-test + + - use storkctl to generate clusterpair + - move cloud_migration to migration test + Signed-off-by: Ram Suradkar + + * Vendor update for torpedo + + Signed-off-by: Ram Suradkar + + * Review Changes + + Signed-off-by: Ram Suradkar + + * Vendor updates for Torpedo + + Signed-off-by: Ram Suradkar + + * vendor updates + + Signed-off-by: Ram Suradkar + + * Set k8s_ops to default after storkctl generate + + Signed-off-by: Ram Suradkar + + * Added generating configmap from remote kubeconfig in test-deply script + + - added review comments + Signed-off-by: Ram Suradkar + +commit 23f54c23ed52f32654004763171819e594787f4e +Author: Craig Rodrigues +Date: Thu Nov 15 18:07:09 2018 -0800 + + Add tests for pvc + +commit fdd9c8eaeeed0d260dd5d3bfc41cfd02034a8711 +Author: Craig Rodrigues +Date: Tue Nov 13 00:33:57 2018 -0800 + + Add tests for cluster pair and migration. + + Rename testSnapshotsCommon to testCommon for use in clusterPair tests + Refactor testCommon() for use in ClusterPair tests + + Comment out tests which need more work + +commit 3d8747d16e6df13d87a194723e2cc779b72aa44a +Author: Craig Rodrigues +Date: Tue Nov 13 17:33:08 2018 -0800 + + Rearrange use of util.CheckErr() to fix unit test in failure scenario + +commit 42935e10a86c53c904e47d67576c32eae4761c6c +Author: Craig Rodrigues +Date: Tue Nov 13 17:32:02 2018 -0800 + + Check for nil config.Contexts[currentContext] + +commit b61f4265e3ead39f98fc8002575bfc4df299825c +Author: Dinesh Israni +Date: Tue Nov 13 14:27:27 2018 -0800 + + Update generated code for CRDs + +commit 3ebdcfde95b76d914dd5f50854f4ec284161be22 +Author: Dinesh Israni +Date: Tue Nov 13 14:26:02 2018 -0800 + + Update code-generator dependecy to kubernetes-1.11.0 + +commit 012ef5d832efb11c43ad541150afff8a1511b5e2 +Author: Dinesh Israni +Date: Tue Nov 13 13:11:03 2018 -0800 + + Remove clusterIP from service before migrating + +commit 67bf9e1dc87f1e2a43b3f88ec61049e78e8c810f +Author: Craig Rodrigues +Date: Mon Nov 12 15:58:33 2018 -0800 + + Add tests for creating and deleting snapshots + +commit 4e184ab082c64aee7daca2bac65ce8e74732d091 +Author: Craig Rodrigues +Date: Mon Nov 12 19:07:40 2018 -0800 + + Replace fmt.Printf() with printMsg(), to use proper ioStreams. + +commit 3ab6bb1b83487d0ca2c26494f81ee57e69475ba9 +Author: Dinesh Israni +Date: Fri Nov 9 18:52:03 2018 -0800 + + Add unittest for version subcommand + +commit 1fa341e1841ed7bbb7ba5a2a7e9285635c7221ea +Author: Dinesh Israni +Date: Fri Nov 9 18:41:20 2018 -0800 + + Add version to storkctl + + - Use generated version in stork + - Use version-gitSHA + +commit ca18866a97ca2f6c168657dc8c36bd489bbc8e09 +Author: Craig Rodrigues +Date: Mon Nov 12 16:16:11 2018 -0800 + + "Atleast" should be "At least" + +commit 58d4546cde6bcd724550274d8642b6235673d497 +Author: Dinesh Israni +Date: Fri Nov 9 18:20:25 2018 -0800 + + Process all migrations before returning + + Still wait for all migrations to complete before updating status + +commit 14b751a545a2446af2046b271796c5ed36ce887d +Author: Dinesh Israni +Date: Fri Nov 9 00:48:10 2018 -0800 + + Set Kind in snapshot object before executing rule + + It doesn't seem to be set always when being passed in from the snapshot + controller. With that the GetObject() and UpdateObject() APIs can't determine + the objects type. + +commit d7de0de3c34a3db8ecd0bc69b938898023f1349f +Author: Dinesh Israni +Date: Thu Nov 8 18:29:05 2018 -0800 + + Make sure namespaces exists before starting migration + + Also don't check for volume migration status if none are being migrated + +commit 59cd317e89dfe11c002e05efe8ef395e02ec7876 +Author: Craig Rodrigues +Date: Mon Oct 15 16:21:11 2018 -0700 + + Add test for snapshots + + - Add test functions for mock testing kubernetes API server + These functions are taken from 'get_test.go' in the kubernetes + repository. + +commit ad41e1ecc67d27a3419c738afad1864fc1034653 +Author: Craig Rodrigues +Date: Fri Nov 9 11:27:05 2018 -0800 + + Update dependencies + +commit 01d1b4770f98b9c73de21d720c3cbd7e732ddbb9 +Author: Dinesh Israni +Date: Thu Nov 8 15:44:33 2018 -0800 + + Push storkctl only from master + +commit c6b17627f07aa00d431fd916746258275377f9b3 +Author: Dinesh Israni +Date: Thu Nov 8 15:05:36 2018 -0800 + + Update dependencies + +commit 597bc3e30789118afec6b545a25470538af4683d +Author: Dinesh Israni +Date: Thu Nov 8 14:57:13 2018 -0800 + + Use kubectl helper to deal with errors + + Error behavior can be overwritten for tests + +commit f84eb8ca1a6c4923d10a1b55da6fc5c6cdfd6a4f +Author: Dinesh Israni +Date: Thu Nov 8 14:35:52 2018 -0800 + + Add auth providers to storkctl + +commit 990a3a54b3e30b9dc32668971c5c87ecb117d547 +Author: Dinesh Israni +Date: Thu Nov 8 13:55:50 2018 -0800 + + Vendor update for sched-ops + +commit 7fcd63af6cce2ff0b034e05297a38319ee743ca5 +Author: Dinesh Israni +Date: Wed Nov 7 18:05:32 2018 -0800 + + Read files when generating cluster pair and populate inline + +commit b3e1f454d84f95ea2c18bebb1c529ed3dc309773 +Author: Dinesh Israni +Date: Wed Nov 7 18:28:09 2018 -0800 + + Add empty PersistentVolumeRef to VolumeSnapshotData + +commit 81b85e586b4a84d0636be585d65d1635515b198e +Author: Craig Rodrigues +Date: Mon Nov 5 18:06:12 2018 -0800 + + Add SetOutputFormat() to Factory interface + + This is needed to facilitate tests. + +commit 64894c894119f5997829d7ec0faa7f67862e3314 +Author: Dinesh Israni +Date: Tue Nov 6 20:29:54 2018 -0800 + + Replace gcloud binary path in generated clusterpair + +commit a4887eb30babfa5166421bc4ec1851cc5d2f8523 +Author: Dinesh Israni +Date: Tue Nov 6 20:15:24 2018 -0800 + + Only add info for current context when generating cluster pair + +commit 80057754a36499c88bd1929b6ba422c85c97cbbb +Author: Dinesh Israni +Date: Tue Nov 6 20:01:20 2018 -0800 + + Add gcloud to the stork container + +commit 1bf3f0c005e3293d11eefb075f0074c160ae2d7f +Author: Dinesh Israni +Date: Tue Nov 6 18:53:18 2018 -0800 + + Update permissions for get and list of all resources + +commit decc97bf178062c75a0e0a7cc34caad15e0dc2ac +Author: Craig Rodrigues +Date: Mon Nov 5 13:37:47 2018 -0800 + + Update golang version in travis to 1.11.2 + +commit e201a7525d7e0f670d3b0eac8d8bf79dd15b61da +Author: Dinesh Israni +Date: Sun Nov 4 00:00:28 2018 -0700 + + Add check to resolve hostname when matching nodes for k8s on DC/OS + +commit 9d67b9a286a312596e1d2a486d6398cddd247aed +Author: Dinesh Israni +Date: Sat Nov 3 23:37:48 2018 -0700 + + Disable leader election for snapshot controller + + Leader election already happens in stork + +commit 55351568b6826b68cb6aa9e1fc80d4cbd123cdce +Author: Dinesh Israni +Date: Fri Nov 2 23:42:22 2018 -0700 + + Fix extender unittest + + Also exit with error in Makefile if unittest fails + +commit 34f7be8cdfadfc64216db6484401151f6826b8bc +Author: Dinesh Israni +Date: Fri Nov 2 22:27:48 2018 -0700 + + Don't use namespace for cluster resources with dynamic client + +commit 34ae11c2ed127ea6de9ea9cc9fea351d9c5c71e1 +Author: Dinesh Israni +Date: Fri Nov 2 22:26:41 2018 -0700 + + Update branch to use for openstorage + +commit 021861735cb69f256c4cf03891636505faf84eab +Author: Dinesh Israni +Date: Fri Nov 2 16:53:36 2018 -0700 + + Removed unused variables + +commit ed27912790cacad21df043642e8d963f32c0ab04 +Author: Dinesh Israni +Date: Tue Oct 30 16:50:00 2018 -0700 + + Update generated code for CRDs + +commit ac00582e4ec2d7380ab9cd3a7977965a462868bf +Author: Dinesh Israni +Date: Tue Oct 30 15:35:23 2018 -0700 + + Switch from govendor to dep + +commit 4332391a181a54410d4e0b85b7625eb962be7111 +Author: Dinesh Israni +Date: Fri Oct 12 15:31:17 2018 -0700 + + Govendor update + + Update k8s version to 1.11.0 + +commit f1c7d85e684f9856546621144014e9768e8e8fc4 +Author: Dinesh Israni +Date: Tue Oct 9 16:55:08 2018 -0700 + + Refactor rule package + + Removed from snapshot package and added a separate package so that it can be + used by different modules + + Also fixed annotation to use the correct conventions. The old annotations can + still be used but they will be deprecated. + +commit 00b78f58e0dce0ad2552b5e47af0008bbfcbca37 +Author: Dinesh Israni +Date: Wed Oct 17 17:32:28 2018 -0700 + + Update storkctl to pass in streams + + Will be used by unit tests + +commit 46b6aeda499628e08ce6a2d9f3287e2041cb4709 +Author: Dinesh Israni +Date: Thu Nov 1 21:07:35 2018 -0700 + + Disable CGO to allow binaries to run on alpine + +commit 3ef4d02ee27be1dc46a1ea6a964a27432748f0af +Author: Dinesh Israni +Date: Thu Nov 1 12:46:54 2018 -0700 + + Add aws authenticator binary to the container + + Also use alpine instead of atomic and remove things added for + rhel registry + +commit faa6b7a05ccf65e5cf694bf3be2bc063ebeec6e4 (tag: v1.3.0-beta1) +Author: Dinesh Israni +Date: Sat Oct 27 00:03:14 2018 -0700 + + Update version to 1.3.0-beta + +commit d7211031e8c806e6f207fedf917e4b0e342ced85 +Author: Dinesh Israni +Date: Fri Oct 26 15:08:15 2018 -0700 + + Make the storctl binaries publicly accessible + +commit 7e43717fe0d9b12f2f6b6fd45df846d47ff83fb6 +Author: Dinesh Israni +Date: Thu Oct 25 20:20:04 2018 -0700 + + Add command to generate clusterpair spec from destination cluster + +commit b6a1883c81c0318e46633a81c0403497a88001de +Author: Dinesh Israni +Date: Thu Oct 25 17:17:44 2018 -0700 + + [Portworx] Add reason for migration failure + +commit befc40f2e55eaab85f4fc2a1b076b35f99e34b37 +Author: Dinesh Israni +Date: Thu Oct 25 17:17:22 2018 -0700 + + Govendor update for openstorage + +commit 723d782af0c7bc5c3d4ac0db29685ecf26b6e806 +Author: Dinesh Israni +Date: Thu Oct 25 15:47:13 2018 -0700 + + [Portworx] Add namespace to migration task Id + + The pvc name can exists across multiple namespaces + Also use task id to match status instead of volumename + +commit b88865a129c8fc19e6dbf44d665891b2f8b2cbc4 +Author: Dinesh Israni +Date: Tue Oct 23 20:21:53 2018 -0700 + + [Portworx] Update cloudsnap and cloudmigrate APIs to use taskIDs for idempotency + +commit 0a3eff3b4862689dc186cda0e3fc16f514cb93b0 +Author: Dinesh Israni +Date: Tue Oct 23 20:21:39 2018 -0700 + + Govendor update for openstorage + +commit ebdd417403d13b1b07c51fa2f9f649474877465a +Author: Dinesh Israni +Date: Tue Oct 23 20:41:49 2018 -0700 + + Update permissions for migration and cluster pair + + Also remote extra permissions for CRDs and sync permissions in the daemonset + spec + + Issue #165 + +commit 257bf61f348a836d7302bb296e8da1b36ebf8db3 +Author: Dinesh Israni +Date: Fri Oct 19 20:59:48 2018 -0700 + + Add command to create pvc from snapshot + +commit 9f2499d3b4fafc4ec1ec3bcc7faf879d01ad3584 +Author: Dinesh Israni +Date: Fri Oct 19 19:16:33 2018 -0700 + + Upload storkctl binaries to s3 + +commit a6dc36b574cc678857abb01be811e930d5e9d28b +Author: Dinesh Israni +Date: Fri Oct 19 18:55:11 2018 -0700 + + Add storkctl to container + +commit abf1e99a5f902c630bbafdea4059ca1cfe318da2 +Author: Dinesh Israni +Date: Tue Oct 9 16:02:29 2018 -0700 + + Add timeouts for updated APIs + +commit 93ad80bc14f0d83a8f1dce890c9cf27289a2a9a2 +Author: Dinesh Israni +Date: Tue Oct 9 16:01:50 2018 -0700 + + Govendor update + +commit 20b16503157af232844ab87f8059c29eb9467576 +Author: Dinesh Israni +Date: Wed Jun 27 14:23:45 2018 -0700 + + Add storkctl to manage stork resources + + - Subcommands added are create, get and delete + - Supported resources are + - volumesnapshots + - clusterpair + - migration + - Supports global parameters for namespace, kubeconfig, context and output + format through CmdFactory + - Binary is build for linux, darwin and windows + + Issue #80 + +commit 2a03cba14fb1841cbda76668f1ce4d82be15086e +Author: Dinesh Israni +Date: Thu Sep 27 15:43:04 2018 -0700 + + Update migration object to be cluster scoped + +commit c199b0a30aac6545f7383b30b1d4d790f385b1a6 +Author: Dinesh Israni +Date: Mon Sep 24 14:21:26 2018 -0700 + + Change driver registration log to debug + +commit 57de4b753abfddbf933b8f5f8f8e070493bfc9d1 +Author: Dinesh Israni +Date: Mon Sep 24 13:49:12 2018 -0700 + + Update resync parameter for controller for time.Duraion + +commit bc319c510b3e4caa24aba77357641ab03c02022a +Author: Dinesh Israni +Date: Wed Sep 19 21:00:00 2018 -0700 + + Govendor update + +commit f988cd4a8544ab317d4344ce2145e7216b52ae03 +Author: Dinesh Israni +Date: Fri Oct 12 14:44:21 2018 -0700 + + Update golang version in travis to 1.10.4 + +commit f52b861d85a5ed8621eecad05eac5e0a26c3cd4a +Author: Craig Rodrigues +Date: Fri Oct 12 14:33:00 2018 -0700 + + Add missing argument to logrus.Warnf (#156) + +commit 0634241b093712bddd9471ff549a236e0cd0a9ba +Author: Craig Rodrigues +Date: Fri Oct 12 14:16:06 2018 -0700 + + Fix import path of golint (#157) + +commit 2eedbd8d2b412e5c540f5a1e8c960b789c8bfff5 +Author: Dinesh Israni +Date: Mon Oct 8 17:44:09 2018 -0700 + + Add auth providers and default client config loading rules + +commit 97d7aacf3a7839c48d172852d518b9e6ffc41bf0 +Author: Dinesh Israni +Date: Mon Oct 8 17:43:54 2018 -0700 + + Govendor update + +commit 150bf8f0a540c0e674e9f0928ccf5a2f3aff05cf +Author: Dinesh Israni +Date: Mon Oct 8 15:56:34 2018 -0700 + + Don't create namespace if it already exists on remote cluster + +commit 6f3f1e4641b0338489cad2245550a8f5a85341f9 +Author: Dinesh Israni +Date: Sat Oct 6 00:13:00 2018 -0700 + + Check if PV and PVC are owned by driver when migrating + +commit 8ef28ffea28fbe2cf4f8bdd67ddefea7d42aa2f3 +Author: Harsh Desai +Date: Fri Oct 5 10:10:03 2018 -0700 + + Group snapshot should wait till all PVCs are bound (#153) + + Fixes #152 + + Signed-off-by: Harsh Desai + +commit bb74377883b97e153710cb3825cdcca3b7082a55 +Author: Dinesh Israni +Date: Thu Sep 27 19:19:09 2018 -0700 + + Start snapshot provisioner in background since it blocks + +commit 5bd7347a775eaa2fd102fd4b9780ffb707092276 +Author: Dinesh Israni +Date: Fri Sep 28 12:57:16 2018 -0700 + + Fix some issues found from Go Report Card + +commit ca466f7d1a8ba2ea1a6b1434519e21923d08faad +Author: Harsh Desai +Date: Fri Sep 21 17:14:52 2018 -0700 + + Add docs for 3d snaps (#148) + + Fixes #147 + + Signed-off-by: Harsh Desai + +commit 760b94dad5d121e1d3323098c608cc83053c0c71 +Author: Dinesh Israni +Date: Mon Sep 10 14:02:05 2018 -0700 + + Start migration controller from cmd + + The controller manager needs to be started first followed by all the + other controllers + +commit 1e1e73201cf03970edf7fa4683fb4db960452ab9 +Author: Dinesh Israni +Date: Mon Sep 10 13:51:35 2018 -0700 + + Add controllers for pairing and migration CRDs + + Pairing: + - When a pairing is created an API call is made to the storage driver to create + a pair. The k8s config is also verified to make sure we can talk to the remote + cluster + - When a pairing is deleted an API call is made to the storage driver to delete + its pairing too + + Migration: + - Takes in a cluster pair name to which migration should be done + - Users can choose to migration only volumes from storage driver or volumes + + resources (ie PVCs, deployments, statefulsets, etc) + - First volumes are migrated from storage driver and then resources are migrated + if requested + - Stage goes from Initial->Volumes->Application->Final + - Pods aren't migrated right now. Need to migrate pods which don't have a + replication controller + - Service account secrets are not migrated since they belong to a cluster + - Config can be set to not scale up applications (ie deployments, statefulsets) + on remote cluster. This will add an annotation to store what the replica count + was on source cluster + + Events are recorded for various success/failure steps. + +commit 63d66122d6617f06333e55a58125e7b0945f2b12 +Author: Dinesh Israni +Date: Mon Sep 10 13:50:27 2018 -0700 + + Add pairing and migration APIs to volume driver interface + + Add implementation in Portworx driver + Return NotImplemented for mock driver + +commit 18efa0bd4ffb6805893bccadb74f5ab9e7de74ac +Author: Dinesh Israni +Date: Mon Sep 10 13:48:25 2018 -0700 + + Remove integration-test from default target + +commit 83f11dabf44de61d01723add05607e8adba843bb +Author: Dinesh Israni +Date: Mon Sep 10 13:46:42 2018 -0700 + + Add controller package that can be used to watch for changes + + Modules can register for updates for particular types + +commit feb7f1822af28417c8219b12cf239c07521a5c41 +Author: Dinesh Israni +Date: Thu Jul 26 14:47:48 2018 -0700 + + Govendor update + +commit ef54663320aafd3172491d06ac70be615ce1d688 +Author: Dinesh Israni +Date: Mon Sep 10 13:45:51 2018 -0700 + + Add CRDs for pairing and migration + +commit dc214de6633dac7f1145e41158c9596d17d91378 +Author: Dinesh Israni +Date: Thu Jul 26 14:47:48 2018 -0700 + + Govendor update + +commit 5e9308aaa5e98edda465d57515575c5a91be5f39 +Author: Dinesh Israni +Date: Thu Sep 13 19:49:36 2018 -0700 + + Update health monitor to also use better node matching logic + + Issue #144 + +commit cd98a94bde5823d8759c29a06245ae5f0e17cad2 +Author: Dinesh Israni +Date: Thu Sep 13 18:47:56 2018 -0700 + + Update test verification to also look at node IP to match nodes + + Issue #142 + +commit 11e06757f1e53909e731790289ecc520830dec37 +Author: Dinesh Israni +Date: Thu Sep 13 15:45:51 2018 -0700 + + When matching nodes check for internalIP incase hostnames are different + + This is for environments like K8s on DC/OS where the kubelet is running + in a containerized environment and has a different hostname than the physical + node + + Issue #142 + +commit fd7785c6bff6dc4fa04ad6fd68cd69d7fb259340 +Author: Dinesh Israni +Date: Thu Sep 6 17:44:50 2018 -0700 + + Add namespace and other info for logs + +commit d950d641d9c9ceda6d9e245100b076cd8ef7633a +Author: Harsh Desai +Date: Fri Aug 24 13:50:35 2018 -0700 + + Rename storkrule to rule (#138) + + * Rename storkrule to rule + + Closes #128 + + Signed-off-by: Harsh Desai + + * update vendor + + Signed-off-by: Harsh Desai + + * openstorage => libopenstorage + + Signed-off-by: Harsh Desai + +commit 130c674ed2ee159bf86e770605d1b6c1f5bc6f64 +Author: Dinesh Israni +Date: Wed Aug 15 13:11:51 2018 -0700 + + Update integration test script to pass in test image + +commit 56ffaf4e559fcdd9f2e7ca25abb00fd94528478f +Author: Dinesh Israni +Date: Mon Aug 13 19:34:35 2018 -0700 + + Portworx: Update snapshot API call to not retry internally + + Also update cloudsnap status type from openstorage + +commit a04b42072a755ef4b35b2049c72af9e81891378c +Author: Dinesh Israni +Date: Mon Aug 13 19:32:50 2018 -0700 + + Govendor update + +commit ed62abf34c351bfec7c8932c1ea077dc800b642f +Author: Dinesh Israni +Date: Tue Aug 14 14:51:48 2018 -0700 + + Add option to integration test script to pick docker image name + +commit 2d35a36009d76af02a6ced8e7e61280f3219e127 +Author: Harsh Desai +Date: Thu Aug 9 14:40:11 2018 -0700 + + Background command fixes (#133) + + * Background command fixes + + 1) Don't send the background termination channel if there are no background commands + 2) Handle situations where there could be more than one background actions and the first one has a single pod + + Fixes #131 + + Signed-off-by: Harsh Desai + + * Update mysql 3d snap test to handle one more background command action which runs on single pod + + Signed-off-by: Harsh Desai + + * if any action fails, terminate all background jobs + + Signed-off-by: Harsh Desai + +commit fec363db2ab62b8ba23156752c2033d92fa32d06 +Author: Harsh Desai +Date: Mon Aug 6 19:59:56 2018 -0700 + + overide default cmdexecutor for tests (#130) + + * overide default cmdexecutor for tests + + Signed-off-by: Harsh Desai + + * rename override annotation + + Signed-off-by: Harsh Desai + +commit bcd0e9af2f8028404908192b9bfe1a4b0b6fa480 (tag: cmd-executor-v0.1) +Author: Harsh Desai +Date: Mon Aug 6 18:59:32 2018 -0700 + + Add support for 3d snapshots (#118) + + * vendoring changes + + Signed-off-by: Harsh Desai + + * Add support for 3d snapshots + + Signed-off-by: Harsh Desai + + * update sched-ops to get crd api + + Signed-off-by: Harsh Desai + + * addressing review comments + + Signed-off-by: Harsh Desai + + * Update sched ops vendor + + Signed-off-by: Harsh Desai + + * Address review comments + + Signed-off-by: Harsh Desai + + * Just send pvc list to rule api + + Signed-off-by: Harsh Desai + + * Review comments + + Signed-off-by: Harsh Desai + + * validate snapshot rules before starting any snap operations + + Signed-off-by: Harsh Desai + +commit 9b460bdf7ae3f30948347aeb130ba36c7dcc2f5e +Author: Dinesh Israni +Date: Fri Jul 27 15:56:30 2018 -0700 + + Fix codegen to use correct vendor directory + + Also update generated code + +commit 92b40731b33e49a7ab423e635db75ff7b172683d +Author: Dinesh Israni +Date: Fri Jul 27 15:56:55 2018 -0700 + + Govendor add k8s code-gen + +commit 454eadc891b46e01ce6caafcf707c37728320a0e +Author: Harsh Desai +Date: Fri Jul 20 14:48:19 2018 -0700 + + Update sched-ops to fix snapshot validation + + - Updated sched-ops will now continue even if the first validation fails + + Signed-off-by: Harsh Desai + +commit c675875fc70ca8271958d60c76fc64fb4f2028fb +Author: Harsh Desai +Date: Thu Jul 19 11:40:52 2018 -0700 + + fix group name + + Signed-off-by: Harsh Desai + +commit 89616e05ca121f6a24de845909fa6b7e5e114603 +Author: Harsh Desai +Date: Wed Jul 18 19:02:14 2018 -0700 + + Update group name + + Signed-off-by: Harsh Desai + +commit 9d12451f451ee67fce6c74cca8d53bbec04453ec +Author: Harsh Desai +Date: Wed Jul 18 18:54:28 2018 -0700 + + workaround to update vendor manually until torpedo and sched-ops are updated + + Signed-off-by: Harsh Desai + +commit 7b48bbd62fc92dd302a2ca93c47742d8696baf37 +Author: Harsh Desai +Date: Wed Jul 18 18:42:43 2018 -0700 + + rename stork.com + + Signed-off-by: Harsh Desai + +commit c7d67aaa77e15ff61c364498f4564bb3111217fa +Author: Dinesh Israni +Date: Tue Jul 10 13:10:29 2018 -0700 + + Update a test to hit path where node name is different from driver node ID + +commit 33e61579416980c5f25ef71218c46ba3504bc6bb +Author: Harsh Desai +Date: Wed Jul 18 13:17:42 2018 -0700 + + fix integration tests (#122) + + The tests now explictly use the get snapshots call and also verify the volumesnapshot data + + Signed-off-by: Harsh Desai + +commit 8358aea2b66f8d437aee409462da4b403ebd3fa4 +Author: Harsh Desai +Date: Tue Jul 17 18:09:46 2018 -0700 + + Check snapshot data before restoring from it (#121) + + * Update sched-ops vendor to get validate api for snapshot data + + Signed-off-by: Harsh Desai + + * Check snapshot data before restoring from it + Fixes #120 + + Signed-off-by: Harsh Desai + +commit 9f16cc0f713dd456d7af585a5d0fa712a6875b02 +Author: Dinesh Israni +Date: Mon Jul 16 14:00:10 2018 -0700 + + Print node info on failure when checking scheduled node + +commit ab453b9161a1446526d8563d00fccfa55c418bf1 +Author: Dinesh Israni +Date: Tue Jul 10 14:18:18 2018 -0700 + + Add unittests for invalid requests to extender + +commit e20ae5a6631a5e3b9629c8d72ff0782bae2d2cbb +Author: Harsh Desai +Date: Mon Jul 9 22:47:22 2018 -0700 + + Add CRD for stork rules (#115) + + * Add CRD for stork rules + + Signed-off-by: Harsh Desai + + * Add script to generate CRD and the generated files + + Signed-off-by: Harsh Desai + + * build changes + + Signed-off-by: Harsh Desai + + * changes to crd review comments + + Signed-off-by: Harsh Desai + + * Remove GOVET_PKGS from makefile and just use PKGS + + Signed-off-by: Harsh Desai + + * fix tabs in makefile and remove unused comments in update-codegen.sh + + Signed-off-by: Harsh Desai + +commit 051ed128d229cb6e8634d49fe1b884b678f03ff8 +Author: Dinesh Israni +Date: Fri Jul 6 17:32:21 2018 -0700 + + Extender: Also check node name to match driver nodes + + Issue #112 + +commit da0cf1214fcfd24715ce9fdd56ba2ea420639231 +Author: Dinesh Israni +Date: Thu Jul 5 18:40:26 2018 -0700 + + Update travis to push image for all branches (#110) + +commit 4e2201c456ca9ebc6f63d739afa28327f47604ec +Author: Dinesh Israni +Date: Tue Jul 3 14:24:33 2018 -0700 + + Update specs in integration test instead of deleting and recreating + +commit 2e8188d5f413dd93b6bfba83ba992c4c6325f115 +Author: Harsh Desai +Date: Mon Jul 2 16:05:36 2018 -0700 + + Add an executor CLI for running async commands in pods (#104) + + * Add an executor CLI for running async commands in pods + + Signed-off-by: Harsh Desai + + * review changes + + Signed-off-by: Harsh Desai + + * address review comments + + Signed-off-by: Harsh Desai + + * fix gosimple + + Signed-off-by: Harsh Desai + +commit af7f47e692c42246b3c544938dea707a06a4dba9 +Author: Dinesh Israni +Date: Thu Jun 28 20:35:50 2018 -0700 + + Check node cache with ID and hostname + +commit 05dbc099b9d0b2c192d210ddeedd4fc71ed618f0 +Author: Dinesh Israni +Date: Wed Jun 27 15:00:29 2018 -0700 + + Add gosimple checker + + Also fix issues found by gosimple + +commit 47fdb22e42eea1e14e8f28c72dbfa57990a03cde +Author: Harsh Desai +Date: Wed Jun 13 09:08:30 2018 -0700 + + don't enforce pv reference for describe of local snapshots + + Fixes #106 + + Signed-off-by: Harsh Desai + +commit 3a07e70fc5f4c0041a1da96d9fb0685b23216641 +Author: Dinesh Israni +Date: Sun Jun 10 19:41:16 2018 -0700 + + Add snapshot scale test + +commit bd5057d805cf0b503f9d0f9a909fd53c67ae7cfe (tag: v1.1.1) +Author: Dinesh Israni +Date: Thu Jun 7 16:55:09 2018 -0700 + + Bump version to 1.1.1 + +commit 45863a30e1544e9c86c9199be6d1647395b564c1 +Author: Dinesh Israni +Date: Thu Jun 7 16:39:00 2018 -0700 + + Add support to restore snapshots to different namespaces + + - When creating snapshots users need to provide comma seperated regexes + with "stork/snapshot-restore-namespaces" annotaion to specify which + namespaces the snapshot can be restored to + - When creating PVC from snapshots, if a snapshot exists in another + namespace, the snapshot namespace should be specified with + "stork/snapshot-source-namespace" annotation + + Issue #71 + +commit 321b525e65a433abbc6de8b63d864f73cdbf8ca4 +Author: Dinesh Israni +Date: Thu Jun 7 03:19:43 2018 -0700 + + Add a cache for node info + + Querying the API server for each node takes too long and times + out the requests in a large cluster + + Issue #99 + +commit 531a1a84f54d29d5b4b5685a32edaaa74286ac26 (tag: v1.1) +Author: Harsh Desai +Date: Thu May 31 17:01:55 2018 -0700 + + Update vendor to pull in sched-ops fix for validating snaps (#97) + + Signed-off-by: Harsh Desai + +commit 21ecd68f4c40cbf3eb9a2e3784996a227b5de23f +Author: Harsh Desai +Date: Wed May 30 08:37:32 2018 -0700 + + retry inspect volumes for group snapshots (#95) + + Signed-off-by: Harsh Desai + +commit 744bf489a533d33bd2ab5118d694816039fd33a2 +Author: Harsh Desai +Date: Wed May 23 23:27:23 2018 -0700 + + Ensure version check for group and cloud snapshot (#94) + + * Ensure version check for group and cloud snapshot + Fixes #82 + + Signed-off-by: Harsh Desai + + * retry on cluster enumerate + + Signed-off-by: Harsh Desai + +commit 6bf00e23b10cb767376cf3db808e2fa33c7d39df +Author: Harsh Desai +Date: Wed May 23 13:34:49 2018 -0700 + + update group snapshot annotation keys (#93) + + * update group snapshot annotation keys + + Signed-off-by: Harsh Desai + + * remove unnecessary checks for labels + + Signed-off-by: Harsh Desai + +commit 818bbdf3e7ed2ff9ed85f642908a9338df95523f +Author: Dinesh Israni +Date: Mon May 21 18:04:16 2018 -0700 + + Make the health-monitor interval configurable + + Default is 120 seconds and minimum is 30 seconds + + Also update version for upcoming release + +commit 87cdd97964d3cd618ce3bbd589e35d0c12f7dfaa +Author: Dinesh Israni +Date: Thu May 17 16:54:12 2018 -0700 + + If no driver nodes found in filter request return error + + Fixes #89 + +commit 99afb2aa72c097effaadb11fcd3c4edb14a20641 +Author: Harsh Desai +Date: Thu May 17 14:30:21 2018 -0700 + + Update vendor to allow error status conditions in failed volumesnapshots (#88) + + Signed-off-by: Harsh Desai + +commit 50cc4666702d7cf2bd42e2f0fa8f2d208cdf81f6 +Author: Harsh Desai +Date: Wed May 16 13:59:10 2018 -0700 + + For group snapshots, include UUID of parent volumesnapshot in child volumesnapshots (#87) + + Fixes #86 + + Signed-off-by: Harsh Desai + +commit fa7d602b8c0ce1c8630706ee9dc68dedb8609c97 +Author: Dinesh Israni +Date: Mon May 14 15:37:06 2018 -0700 + + Update vendor for torpedo + +commit 1be992282d6da65ac594a5698b9e5a025091e7d4 +Author: Dinesh Israni +Date: Thu Apr 26 15:16:40 2018 -0700 + + Update docs for initializer + + Issue #54 + +commit ff8c0a94ed552fda85feb34212dfc7285816abb8 +Author: Dinesh Israni +Date: Thu Apr 26 14:54:09 2018 -0700 + + Add statefulset to integration test to test initializer + + Issue #54 + +commit 22a4ae0b978560ef6e91c1ae5fb787c96bc1045f +Author: Dinesh Israni +Date: Thu Mar 29 13:48:11 2018 -0700 + + Add initializer spec + + Also update deployment spec with comment to enable initializer + + Issue #54 + +commit 7ea4eeb0ff2351bcb075a139bb1475e5d82861cc +Author: Dinesh Israni +Date: Fri Mar 2 22:32:04 2018 -0800 + + Add initializer to update scheduler name + + Scheduler name is updated only if pod is using a volume by the + specified driver + + - Also added cmd args to enable/disable features + - Need a different initializer for v1, v1beta1 and v1beta2 + - Updated test script to enable test with initializer + - Updated APIs for k8s 1.10 + + Issue #54 + Issue #9 + +commit 609a15b3cb4f094e15515a53c72ba5e14c771528 +Author: Dinesh Israni +Date: Tue May 8 17:38:38 2018 -0700 + + Govendor update k8s libraries to 1.10 + +commit 72da8b1bb38cf2fbb10d8feea16bd6803ccb600d +Author: Harsh Desai +Date: Sun May 13 14:44:59 2018 -0700 + + Fix cloudsnap integration to check only data volumes in use (#84) + + * Fix cloudsnap integration to check only data volumes in use + + Signed-off-by: Harsh Desai + + * extract common func to parse data volumes + + Signed-off-by: Harsh Desai + +commit 6a3595d944acdc233713e4d2e25386ae97a4d090 +Author: Harsh Desai +Date: Thu May 10 17:01:33 2018 -0700 + + verify scheduled node only for mysql-data-1 (#83) + + Signed-off-by: Harsh Desai + +commit b355c25df712f4b03bacd0936aea6a9eb2eecf8e +Author: Harsh Desai +Date: Thu May 10 13:59:52 2018 -0700 + + Add support for cloud and group snapshots (#78) + + * vendoring changes + + Signed-off-by: Harsh Desai + + * Add supports for cloud and group snapshots + Fixes #61 + Fixes #55 + + Signed-off-by: Harsh Desai + + * fix travis build + + Signed-off-by: Harsh Desai + + * Address review comments. Key changes + 1) Wait indefinitely for cloudsnap completion + 2) Revert group snaps on partial completions + 3) cloudsnap restore support + 4) add cloudsnap test + + Signed-off-by: Harsh Desai + + * Review comments + + Signed-off-by: Harsh Desai + + * vendor update for cloudsnap operation type + + Signed-off-by: Harsh Desai + + * review comments + + Signed-off-by: Harsh Desai + + * Add missing secrets vendor + + Signed-off-by: Harsh Desai + +commit db0f19df337c3c53ed4b76808f34c00f39c2ed53 +Author: Dinesh Israni +Date: Thu May 3 12:12:43 2018 -0700 + + Use correct volume name for snapshot in torpedo + +commit e77632d4719cc9cc39bc528f88be37165bdd0f92 +Author: Dinesh Israni +Date: Wed May 2 16:48:46 2018 -0700 + + Govendor update for torpedo and sched-ops + +commit fca8c002c358645ccecebf10ee528078da8525ba +Author: Dinesh Israni +Date: Wed May 2 16:22:32 2018 -0700 + + Portworx: Return NotImplemented for FindSnapshot + +commit 5367db20ec559509336f48ce583608b5b8cd8d37 +Author: Dinesh Israni +Date: Tue May 1 13:11:11 2018 -0700 + + Update stork specs to remove predicate not present in 1.9.x + +commit 779a017c68ee9e79f2fcfe540255f61e669660c8 +Author: Dinesh Israni +Date: Tue May 1 12:40:18 2018 -0700 + + Update SnapshotCreate API for Portworx driver + + Issue #55 + Issue #61 + +commit 1e9aa635ba9b1310c8e176f4530aa52fff30edef +Author: Dinesh Israni +Date: Tue May 1 12:40:03 2018 -0700 + + Govendor update for external-storage + + Issue #55 + Issue #61 + Issue #42 + Issue #43 + Issue #69 + +commit 2bbd7d3a63c5a9f0278230c9e4673ecd73bd99d2 +Author: Harsh Desai +Date: Tue Apr 24 12:37:18 2018 -0700 + + Fix docker tag in build instructions (#70) + +commit 1ed4f9f6a31faff5901ede6350de2a3d87268d20 +Author: Dinesh Israni +Date: Fri Apr 20 13:27:03 2018 -0700 + + Portworx: Use snapshot UID for snapshot name to make it unique + + Also add snapshot name and namespace as labels when creating the + snapshot + + Updated integration test + + Issue #67 + +commit 39ca53c69d3215121a5d3b8f99cf571e5aea7c65 +Author: Dinesh Israni +Date: Thu Apr 19 13:22:19 2018 -0700 + + Pick clone volume correctly in test + +commit a3c786b41a0de2a4cd2482aeb08063c321f95f84 +Author: Dinesh Israni +Date: Wed Apr 18 15:23:31 2018 -0700 + + Update snapshot test to also expect snapshot volume + +commit a8a669ca260dd2f8786ff264b52c714ed5f981f5 +Author: Dinesh Israni +Date: Tue Apr 17 18:57:31 2018 -0700 + + Update torpedo API calls for stopping driver + +commit 3ccb35122fcf746b895019bc5ca2c106e35179e5 +Author: Dinesh Israni +Date: Tue Apr 17 18:54:21 2018 -0700 + + Govendor update sched-ops, torpedo and dependencies + +commit 522688da2405233ff21317985407396af8ddfa4b +Author: Dinesh Israni +Date: Tue Apr 17 23:10:16 2018 +0000 + + Update ssh username and password from env variables + +commit 99b6266e3eee5ebf8c7b5e527ad620cb66b703f6 +Author: Dinesh Israni +Date: Tue Apr 17 23:09:24 2018 +0000 + + Add storageclass permissions for scheduler, required for 1.10 + +commit 733dcbae0aff13f530890a5373d2a9cbb2e4d23b +Author: Dinesh Israni +Date: Fri Feb 16 22:59:25 2018 -0800 + + Add support for node locality awareness when priotizing nodes + + Also updated Portworx driver to return rack/zone/region info + + Issue #6 + +commit 3510c416649cee4426cc8ad9ca07090363fff3c3 +Author: Dinesh Israni +Date: Fri Feb 16 21:07:25 2018 -0800 + + Replace k8sutils with sched-ops package + + Issue #17 + +commit 247723c76f71240c5130f5f9529463b6bff58bb3 +Author: Dinesh Israni +Date: Fri Feb 16 19:59:53 2018 -0800 + + Govendor update for torpedo and sched-ops + +commit 577ca9c050bff35bda2e75466065175fd596128d +Author: Dinesh Israni +Date: Fri Apr 6 14:58:36 2018 -0700 + + Use correct path to replace stork tag + +commit b9a2aafafcab53d9eb4727f662b75fbb771c6414 +Author: Dinesh Israni +Date: Thu Apr 5 13:37:59 2018 -0700 + + Check PVC first for provisioner + + Issue #49 + +commit 84ba7e8db90f5e4d6998b36f8de469517458d4c0 +Author: Dinesh Israni +Date: Tue Apr 3 12:55:04 2018 -0700 + + Don't check source PV when restoring from snapshot + + The original PV could have been deleted. + + Issue #57 + +commit 5dee852495a17a46ed422469f1aaa7c0fbc3de98 +Author: Piyush Nimbalkar +Date: Fri Mar 30 15:13:15 2018 -0700 + + Add pvc namespace label to the volume on restoring snapshot + +commit 95a05becbadb91a9abfa90203f20a09d35561299 +Author: Piyush Nimbalkar +Date: Fri Mar 30 15:06:45 2018 -0700 + + Add pvc name label to the restored snapshot + +commit 473c8bf68c3706dfc9f21cf4480536ecd252b4ee +Author: Dinesh Israni +Date: Wed Mar 28 19:10:46 2018 -0700 + + Fail if any unit test hits errors + +commit 27e4f4f90aa73a413fac3899dd730bd6a6730ad6 +Author: Dinesh Israni +Date: Sun Mar 4 15:16:36 2018 -0800 + + Fix error check in extender prioritize request + +commit d39ecfde1f5c56fc6f647efc476a3c96b776a00c +Author: Dinesh Israni +Date: Mon Mar 26 20:43:09 2018 -0700 + + Portworx driver: Check other sources for provisioner + + Issue #49 + +commit 54bc88d83d811950d16204c985bc8164627498d9 +Author: Dinesh Israni +Date: Tue Mar 13 21:19:45 2018 -0700 + + Add codecoverage and badges + +commit 87d0c66d63b4ef0095a30248e3c8b9ff9e3cbf6e (tag: v1.0.3) +Author: Dinesh Israni +Date: Mon Mar 19 18:31:13 2018 -0700 + + Bump version to 1.0.3 + +commit 47380187d3b595875d4a060a82782fec69fca439 +Author: Dinesh Israni +Date: Mon Mar 19 15:41:02 2018 -0700 + + Add some metadata to docker container + +commit be2c099b5a38f2290071fbb79d6e8901b65f6bb7 +Author: Dinesh Israni +Date: Sat Mar 17 23:36:29 2018 -0700 + + Portworx driver: Check for source not being nil during inspect + + Issue #45 + +commit 21dcb685ae5bd3401467a4568fec165fcf7d689e +Author: Dinesh Israni +Date: Tue Mar 13 21:41:47 2018 -0700 + + Create CODE_OF_CONDUCT.md + +commit 3f9ee2d62bda5767692f9e11de7ae3d41ae617c4 +Author: Dinesh Israni +Date: Fri Mar 2 23:01:38 2018 +0000 + + Add instructions to use stork with default scheduler + + Issue #38 + +commit 3a32d6f81bc42719ed29811af06e0346ed750cb7 +Author: Dinesh Israni +Date: Thu Mar 1 16:56:05 2018 -0800 + + Update spec to point to v1.0.2 + +commit b535e8bbfe99ce34ae564c2cad493689435f9c05 (tag: v1.0.2) +Author: Dinesh Israni +Date: Thu Mar 1 16:29:58 2018 -0800 + + Bump version to 1.0.2 + +commit 749c9beb5a63b208e3c35f1f46f9f5d79ce5d2ab +Author: Dinesh Israni +Date: Wed Feb 28 13:37:18 2018 -0800 + + Update test script to use master image tag + +commit 7206fe2657c9eba88edf2e61870b7b120e7f7d53 +Author: Dinesh Israni +Date: Fri Feb 23 18:28:01 2018 -0800 + + Also check for short hostnames in filter request + + Issue #30 + +commit 3818bbdd2f19d0472049642ac5b798d40f70a303 +Author: Dinesh Israni +Date: Fri Feb 23 15:13:10 2018 -0800 + + Govendor add new dependencies + +commit c60ddf5412e0c7e3d33e8428fd8206a750bfc292 +Author: Dinesh Israni +Date: Fri Feb 23 14:52:51 2018 -0800 + + Update sync cache for snapshot controller + + Issue #29 + +commit 9bf8b5a92886911252c6209714c5d0a00be60895 +Author: Dinesh Israni +Date: Fri Feb 23 15:56:34 2018 -0800 + + Check for short hostnames in extender by checking prefix + + Issue #30 + +commit 6250ee576b8948facc827287b48e9777c5a57da4 +Author: Dinesh Israni +Date: Fri Feb 16 18:17:53 2018 -0800 + + Move the container to be based on atmoic + + Issue #27 + +commit c09173e8fe495e3dff9157c7234220e56c42c4b7 +Author: Dinesh Israni +Date: Thu Feb 15 13:22:01 2018 -0800 + + Update README.md + + Fix formatting for snapshot spec example + +commit 80f737f2c9ae6e1795966afff3adf155fb8501e9 +Author: Dinesh Israni +Date: Mon Feb 12 12:14:30 2018 -0800 + + Add note about predicates and priorities in spec + +commit 0fce6aec6759642f76b1a0938336d6a6b75dd115 +Author: Dinesh Israni +Date: Mon Feb 12 11:38:56 2018 -0800 + + Add configmap namespace for scheduler deployment + +commit 44aab7c9a6928f565d11f6c97d93cdd1bd4ab099 (tag: v1.0.1) +Author: Dinesh Israni +Date: Mon Feb 5 14:44:13 2018 -0800 + + Update version to 1.0.1 + +commit 89b6bb1effb25fd80f11b651792bdfc4ca30835a +Author: Dinesh Israni +Date: Thu Feb 1 13:23:57 2018 -0800 + + Convert hostname to lower case to match kubernetes hostnames + + Issue #24 + +commit f49d2a57d24c630efd340d6289b76836e8d73143 +Author: jose nazario +Date: Wed Jan 31 15:18:36 2018 -0500 + + spelling fixes, no content changes (#23) + +commit deebae19e68fa807a01b8d5c3eba81d722c78223 (tag: v1.0, tag: V1.0) +Author: Dinesh Israni +Date: Thu Jan 18 12:52:21 2018 -0800 + + Update specs to remove unnecessary fields + +commit 8a11127e781701ae0d3950c6c9ddcb504c860a85 +Author: Dinesh Israni +Date: Tue Jan 9 14:16:58 2018 -0800 + + Update README.md + +commit cc3ecd9e25c3d43a96562a1c028f2f3fb0429cf3 +Author: Dinesh Israni +Date: Mon Jan 15 02:47:57 2018 +0400 + + Update stork spec to use v1.0 + +commit 343e2816f902947ce993eec80fc1f5dc71261429 +Author: Dinesh Israni +Date: Mon Jan 15 02:39:50 2018 +0400 + + Update version to 1.0 + +commit 7a1d573b6113db1874e52d7c9c25fd7aaa6ec4ee (tag: v0.3.1) +Author: Gou Rao +Date: Wed Jan 10 17:36:19 2018 -0800 + + Update README.md + + fix typo + +commit e791288f4f721e58bdaa9d43375b9f363eaa570b +Author: Dinesh Israni +Date: Tue Jan 9 19:07:15 2018 -0800 + + Wait for integration test to come out of Running state at the end + +commit 7b38516675a0cec2ace60c2fe0cfe2733086b008 +Author: Dinesh Israni +Date: Mon Jan 8 21:10:41 2018 -0800 + + Check for correct status for integration test + +commit 4a5fe058f0f9e310a47c65e1f7b3e6194bbbb697 +Author: Dinesh Israni +Date: Mon Jan 8 20:31:49 2018 -0800 + + Fix for using hostname instead of driver node ID + + The node IDs returned in volume info might not match the hostname + +commit 9da0fc8bd4d5cfd84d4c27fae4dd7773b842dd6a +Author: Dinesh Israni +Date: Mon Jan 8 20:28:53 2018 -0800 + + Govendor update torpedo drivers + +commit 031b4c7f59faabbba6e4a17a772f320ddad8bde8 +Author: Dinesh Israni +Date: Mon Jan 8 15:11:26 2018 -0800 + + Update stork spec to use v0.3.1 + +commit 9d0dd36ae1557e6d66c716b99f1df12d52a68a58 +Author: Dinesh Israni +Date: Mon Jan 8 15:09:01 2018 -0800 + + Make test-deploy.sh executable + +commit 05c6ddfef37f14395dc76e636245bbd618844a43 +Author: Dinesh Israni +Date: Mon Jan 8 14:54:11 2018 -0800 + + Update version to 0.3.1 + +commit f3b978d3c6c036805d1f91ac7437407df5bed418 +Author: Dinesh Israni +Date: Mon Jan 8 13:39:35 2018 -0800 + + Add script to deply and run stork integration tests + +commit 2296144226ae8b58ce8a70d92d186f4f7ca881c6 +Author: Dinesh Israni +Date: Mon Jan 8 10:55:44 2018 -0800 + + Remove stork-snapshot storageclass from integration test spec + +commit 458a84f8a006a70d8863fd1798a4b4cefcfbdfdb +Author: Dinesh Israni +Date: Mon Jan 8 10:53:06 2018 -0800 + + Add stork-snapshot storageclass to spec + +commit 668470cf5d79cc9c5a8a3ae0e8baf4e699d0724e +Author: hr1sh1kesh +Date: Wed Jan 3 16:06:23 2018 +0530 + + Fix RBAC permissions for stork-account on ConfigMaps + +commit fec13849080a9a3875f92ae2c5de5400414b30ae +Author: Dinesh Israni +Date: Thu Dec 28 13:32:30 2017 +0400 + + Add specific permissions for stork and the scheduler + +commit f30d98f39219176e28b70cbcf781704a7c3b3382 +Author: Dinesh Israni +Date: Thu Dec 28 13:22:47 2017 +0400 + + Start extender on all replicas + +commit d82e6c377ecd08112c4ef26232d8da9c6ca2610d +Author: Dinesh Israni +Date: Wed Dec 20 06:17:10 2017 +0100 + + Update stork-scheduler.yaml + +commit 53f81d8487b86f4228e5717b182aefe1ac888e06 +Author: Dinesh Israni +Date: Tue Dec 19 21:05:45 2017 -0800 + + Update stork-scheduler.yaml + + Update spec to not run more than one pod per node + +commit 7fd4ae356261d4e61957a4cd213c81e3023ee06c +Author: Dinesh Israni +Date: Wed Dec 20 05:59:14 2017 +0100 + + Update stork-deployment.yaml + + Update stork spec to not run more than one pod per node + +commit f3cafb73a438a772adacd99a282a1425dd61298e +Author: Dinesh Israni +Date: Fri Dec 15 18:43:18 2017 -0800 + + Update stork to 0.3 in spec and cmd + +commit cda37a5847f47c04f7d765165f7b1eb43d6b947d +Author: Dinesh Israni +Date: Fri Dec 15 18:42:25 2017 -0800 + + Update README.md + +commit d65bdafa61bce7e2008fa88aeae0589f4c33f73a +Author: Dinesh Israni +Date: Fri Dec 15 18:40:11 2017 -0800 + + Update stork deploment spec to run 3 replicas + + Issue #10 + +commit 5267f64ded2d2622ea6b9f230b0d8f29e0a28b65 +Author: Dinesh Israni +Date: Fri Dec 15 18:31:36 2017 -0800 + + Wait 1 minute before deleting pod in extender test + +commit 9cab0ebc3e66b4c7216e10815e7dbb25380006cf +Author: Dinesh Israni +Date: Thu Dec 14 18:38:41 2017 -0800 + + Add support for leader election when starting up + + Leader election is enabled by default + Can specify lock object name and namespace as cmd args + + Issue #10 + +commit bae23895e95f960cce3ca496e506ebde99aefd4f +Author: Dinesh Israni +Date: Thu Dec 14 18:37:54 2017 -0800 + + Govendor update + +commit 00b128afdb05c21d9e7350fffbfd8d5216c24120 +Author: Dinesh Israni +Date: Wed Dec 13 19:37:43 2017 -0800 + + Add basic integration tests for snapshot + + Issue #8 + +commit 572c1b1ed4694899e37e4a8ba9a2be072238d86f +Author: Dinesh Israni +Date: Wed Dec 13 19:34:39 2017 -0800 + + Govendor update + +commit 1e50be0d9fc379cb83b901498114eef1bec77c63 +Author: Dinesh Israni +Date: Wed Dec 6 13:55:43 2017 -0800 + + Fix error check when stopping extender server + +commit da800c2b6a4e57ea291adf02f51cb7a4e596d1bf +Author: Dinesh Israni +Date: Tue Dec 5 17:39:17 2017 -0800 + + Add timeout context for shutting down extender + +commit 4f4bbd0a2a94a0757486433d4944845be8081392 +Author: Dinesh Israni +Date: Tue Dec 5 16:58:11 2017 -0800 + + Replace Sirupsen with sirupsen in vendored packages + +commit f45ce061cd1b58b11f9b88515c69c0c469be9e0a +Author: Dinesh Israni +Date: Tue Dec 5 16:46:54 2017 -0800 + + Add support for snapshots + + Starts a snapshot controller as well as a provisioner to create PVC from + snapshots. + + Based on the snapshot work being done in kubernetes-incubator + + Issue #8 + +commit 7e326f356bb5310fa2cd28b442148c2e19026c9a +Author: Dinesh Israni +Date: Wed Dec 13 19:38:41 2017 -0800 + + Update volume driver Init() to take interface parameter + +commit d2346ec8c40f86608a5bd81624f6dba445bc781e +Author: Dinesh Israni +Date: Thu Dec 14 00:03:13 2017 -0800 + + Add VolumeName and ParentID in volumeInfo + +commit cd21e73f31bd38fa5f13b5571abc6f1b107188c6 +Author: Dinesh Israni +Date: Tue Dec 5 16:45:25 2017 -0800 + + Implement snapshot plugin for Portworx driver + + Issue #8 + +commit 1e1bf2b451db651b7b3387187ef4c3da567f2e9a +Author: Dinesh Israni +Date: Tue Dec 5 16:31:34 2017 -0800 + + Govendor update + +commit 7631942690491240ea8f5133c5b819e2570d2e17 +Author: Dinesh Israni +Date: Thu Dec 14 00:21:01 2017 -0800 + + Update stork-deployment to use v0.2 + +commit b67596ad70a23ad757da952c95fbef04e63447fc +Author: Dinesh Israni +Date: Mon Dec 11 21:48:31 2017 -0800 + + Update README.md + + Updated for HA scheduler + +commit 4be84d4c17321f80aa0e686b7169e86e5a4ab703 +Author: Dinesh Israni +Date: Mon Dec 11 21:45:58 2017 -0800 + + Delete stork-daemonset.yaml + +commit 89e4a2133f1ff138c51490b118aeb515b40cf0e0 +Author: Dinesh Israni +Date: Mon Dec 11 21:45:12 2017 -0800 + + Update stork-scheduler.yaml + + Add lock object name for leader election + +commit 5ca3a853f435c9ed4e4cad5a6056ce02275355f6 (tag: v0.2) +Author: Dinesh Israni +Date: Tue Dec 5 17:24:31 2017 -0800 + + Update version to 0.2 + +commit fd2884ac9b3f91ba5c034ea404ce7c33ecadcb33 +Author: Dinesh Israni +Date: Tue Nov 28 17:24:31 2017 -0800 + + Update ISSUE_TEMPLATE.md + +commit a3ca5200d0581b3347021a55d6aa16d3ca1fd649 +Author: Dinesh Israni +Date: Mon Nov 27 18:58:25 2017 -0800 + + Fix typo + +commit e2a38f867f2a086bc5fe4343ae6dc6b8e7157228 +Author: Dinesh Israni +Date: Mon Nov 27 14:20:45 2017 -0800 + + Update gitignore + +commit a321971f4d3751c90dbcf4841221e881c41a59ba +Author: Dinesh Israni +Date: Tue Nov 21 16:53:35 2017 -0800 + + Update travis to build integration tests + +commit 0448d5944a810d4fe44557a9a7492a35d4bd01cd +Author: Dinesh Israni +Date: Tue Nov 21 16:27:45 2017 -0800 + + Replace Sirupsen with sirupsen packages + +commit 895e879d9762bbf7a26a5bb494217ab53e300d02 +Author: Dinesh Israni +Date: Tue Nov 21 15:45:14 2017 -0800 + + Updates for Portworx driver + + - Fix typo in error struct + - Add check for length of volumes received from Inspect() + +commit 1c32343e58c596de2df147d197653e44a3d9b95b +Author: Dinesh Israni +Date: Mon Nov 20 19:26:09 2017 -0800 + + Govendor update + +commit 6e2fd3e1f89b6f686498f0bdcf8f11beda01a3bf +Author: Dinesh Israni +Date: Mon Nov 20 19:24:20 2017 -0800 + + Integration tests for extender and health monitor + + - Using torpedo APIs to start pods + - Verifying filter and prioritize behavior + - Verifying pods are deleted and relocated if driver fails on a node + + Issue #2 + +commit a59ddcdd0966c0eb6bc724dd2154351c782d4479 +Author: Dinesh Israni +Date: Mon Nov 20 17:11:56 2017 -0800 + + Update for unit tests + + - Add tags for unit test + - Fix noDriverVolumeTest to use a non-Mock volume in pod + +commit 2cd6ed48aaee6e6078d2926322bb80c7a53bc3ee +Author: Dinesh Israni +Date: Mon Nov 13 18:22:55 2017 -0800 + + Add initial monitor to check for health of driver on nodes + + If driver on a node is unhealthy, delete all pods using the driver + on that node + + Issue #3 + +commit dcf1ddbbbc2e830b7d5a8386e539814fb3c9ff37 +Author: Dinesh Israni +Date: Mon Nov 13 18:19:12 2017 -0800 + + Update k8sutils to get all pods and delete a pod + +commit 50940c39dbe886e52ac19ebb762b8737c2c0fbbe +Author: Dinesh Israni +Date: Mon Nov 13 18:18:30 2017 -0800 + + Add Start and Stop for extender + +commit 862ba2151c41dd651456bd4c09d9c4621e2912e1 +Author: Dinesh Israni +Date: Mon Nov 27 17:12:10 2017 -0800 + + Update README.md + +commit ed14a426b4673c011b05b890ce6fde765f6b17f0 +Author: Dinesh Israni +Date: Mon Nov 27 17:11:18 2017 -0800 + + Update README.md + +commit 3bcd6d3b9060f6a9520f92fa9ffd5d498ffcba1f +Author: Dinesh Israni +Date: Mon Nov 27 17:09:48 2017 -0800 + + Update stork-deployment.yaml + +commit 339e586b376806adb504f762a577fbe154b0df34 +Author: Dinesh Israni +Date: Mon Nov 27 15:10:18 2017 -0800 + + Update README.md + +commit e636512d0fe569590b8b918887b11754604da84c +Author: Dinesh Israni +Date: Mon Nov 27 15:09:49 2017 -0800 + + Create CONTRIBUTING.md + +commit 926cd3313513d51c7f03c9d2077c7150d8790f59 +Author: Dinesh Israni +Date: Mon Nov 27 14:57:15 2017 -0800 + + Create ISSUE_TEMPLATE.md + + Add issue template based on K8s template + +commit 586da254ebc883777d298d017a1d294711d4524d +Author: Dinesh Israni +Date: Tue Nov 7 15:20:24 2017 -0800 + + Update README.md + + Add some badges + +commit 45137f4cb917a18aab93ca55d714ddeb78c9dee3 +Author: Dinesh Israni +Date: Wed Nov 1 13:18:51 2017 -0700 + + Update .travis.yml + +commit 01ea5a696805de4ceb363e7c976cf38289daa317 +Author: Dinesh Israni +Date: Wed Nov 1 12:30:29 2017 -0700 + + Update .travis.yml + + Push to latest tag if build and tests pass + +commit 294eab2c52179948ad081cc710800b02b67c017c +Author: Dinesh Israni +Date: Tue Oct 31 15:51:33 2017 -0700 + + Update travis to run unit tests + +commit d339c974c0cd63c1689fbb040cae25ffb59d668b +Author: Dinesh Israni +Date: Tue Oct 31 15:40:48 2017 -0700 + + Add UTs for scheduler extender + +commit f092a13879dfca76bdd8a55b461b0b80682dd1f1 +Author: Dinesh Israni +Date: Tue Oct 31 15:39:55 2017 -0700 + + Remove unused method from k8sutils + +commit 8310f288d30ae70d0ecd4166ebf366c277913949 +Author: Dinesh Israni +Date: Tue Oct 31 15:39:07 2017 -0700 + + Assign default scores to nodes even if there are no volumes for a driver + +commit 772e14a2b0ba4fcccd713d9aaf13ed6b74d3c382 +Author: Dinesh Israni +Date: Tue Oct 31 15:38:30 2017 -0700 + + Add Mock volume driver + +commit e24429bf087431d87f4dcedd2a15aa39c83e6992 +Author: Dinesh Israni +Date: Mon Oct 16 15:56:43 2017 -0700 + + Update README.md + +commit fdc62dbe2db6e397c30b66cd4d26a8bd3399a4e8 +Author: Dinesh Israni +Date: Mon Oct 16 15:50:22 2017 -0700 + + Update deployment instructions in README + +commit 4c9c1afd4365e8a1ac819d43bd39157fcd709ad7 +Author: Dinesh Israni +Date: Mon Oct 16 14:27:00 2017 -0700 + + Update specs + + - Add separate specs for deployment and daemonset + - Add example mysql pod spec + +commit d66d5457ffc4723f46d900fb4abf468a9d91c0fd +Author: Dinesh Israni +Date: Sun Oct 15 23:23:25 2017 -0700 + + Update stork spec + + - Fix typo in externder URL in config map + - Set hostNetwork to false for stork pod + +commit 0e4d120c7af1a85a19dc3e2491722c4b12011da9 +Author: Dinesh Israni +Date: Sun Oct 15 19:39:05 2017 -0700 + + Update stork scheduler pod name in spec + +commit 0c4408a187d4fb5e61ff685dae2387a5be2d1d63 +Author: Dinesh Israni +Date: Sun Oct 15 19:32:24 2017 -0700 + + Move spec files + + - Added spec for scheduler which can be used if users don't want to change + the config for their default scheduler + - Added config map in stork spec which can be used by any scheduler + +commit c2047e86796a7bd8d823235275c326977763857f +Author: Dinesh Israni +Date: Sat Oct 14 14:16:51 2017 -0700 + + Add pretest to travis build + +commit 531d0988f3692253a81d73110c4c06fbfe37c69b +Author: Dinesh Israni +Date: Sat Oct 14 14:15:44 2017 -0700 + + Add error checks + +commit 848c9f3d626a7c5bced0e9ca41d83188db6df935 +Author: Dinesh Israni +Date: Sat Oct 14 13:54:36 2017 -0700 + + Update deploy target in Makefile to only push docker image + +commit 8b24e2ed06c7e40d748f859505a23ec10e81ff1e +Author: Dinesh Israni +Date: Sat Oct 14 13:48:10 2017 -0700 + + Update .travis.yml + +commit 09dd095a127f7b0bd7984f7a90cd4cdf706fb3e9 +Author: Dinesh Israni +Date: Fri Oct 13 20:47:34 2017 -0700 + + Update .travis.yml + +commit 7f795951fae21ce42cc3ec0b204c8780145cc895 +Author: Dinesh Israni +Date: Fri Oct 13 20:43:15 2017 -0700 + + Update .travis.yml + +commit d04c3db91dba90da85dd3a0effc34b2bc4611466 +Author: Dinesh Israni +Date: Fri Oct 13 20:23:13 2017 -0700 + + Add travis build + +commit 4674dc3ae5c80df99ecc76f08e0567475fc0e75b +Author: Dinesh Israni +Date: Fri Oct 13 19:43:23 2017 -0700 + + Make the priority scores into constants + +commit b2214887a6ec8be9ff192438a50d0892845078e0 +Author: Dinesh Israni +Date: Fri Oct 13 19:42:52 2017 -0700 + + Update logs to print pod information to help with debugging + +commit 9d1faa6df04f25689786b36d09371a08a4946218 +Author: Dinesh Israni +Date: Fri Oct 13 19:06:54 2017 -0700 + + Replace image name in pod spec with a tag + +commit 98ffffd62f216b001262ecda5911a3641abe5584 +Author: Dinesh Israni +Date: Fri Oct 13 18:58:11 2017 -0700 + + Initial commit for a scheduler extender + + - Serves 'filter' and 'prioritize' requests + - Filters out nodes where the driver is not running or unhealthy + - Prioritizes nodes where the volume has been allocated + - For each volume on a node, the score is bumped by 100 + - Any node that doesn't have data is given a score of 10 so that it + isn't ignored by the scheduler + +commit d6118dca35066c915a59e23e71b49574d277f510 +Author: Dinesh Israni +Date: Tue Oct 3 15:29:05 2017 -0700 + + Govendor update + +commit e1ef31eedeeacdffa8760aa88fc2bf2c1764a6f5 +Author: Dinesh Israni +Date: Tue Sep 26 22:31:51 2017 -0700 + + Update readme + +commit a9a2a9ec1e18f8063b0940bf42cd7d2bd09667d6 +Author: Gou Rao +Date: Tue Sep 26 11:21:26 2017 -0700 + + Update README.md + +commit af7dfd032898990740359501bc9ed91becfbe896 +Author: Gou Rao +Date: Tue Sep 26 17:02:59 2017 +0000 + + new logo + +commit ff9853640cb11f8c8c374ec21d807fd33003f288 +Author: Gou Rao +Date: Fri Sep 22 18:01:22 2017 -0700 + + Update README.md + +commit c896d9dcd56f6456f972e25015713c4b6c821685 +Author: Gou Rao +Date: Thu Sep 7 18:45:28 2017 +0000 + + added a vendor dir + +commit b1931f336c02cd89092fce1bb86047d970a3084c +Author: Gou Rao +Date: Wed Sep 6 20:24:55 2017 +0000 + + Add pod spec example to readme + +commit 4021ab76b1cba1c0f66eef14d75614dcf4b5aa15 +Author: Gou Rao +Date: Wed Sep 6 19:51:20 2017 +0000 + + Add pod spec example to readme + +commit e7988e32def210ceaa6d9ce8af831a01e3093cdb +Author: Gou Rao +Date: Wed Sep 6 19:44:06 2017 +0000 + + Add pod spec example to readme + +commit 87420d6c0815d80d29f7ac4082f1ae66bee76cb3 +Author: Gou Rao +Date: Wed Sep 6 06:00:09 2017 +0000 + + redo logo + +commit eecf1be14776e24ee21eec52bda59f981c84734d +Author: Gou Rao +Date: Wed Sep 6 05:47:02 2017 +0000 + + redo logo + +commit b134fbc6bb06ca291861e11bbb29934b30ce9c0b +Author: Gou Rao +Date: Wed Sep 6 05:44:27 2017 +0000 + + redo logo + +commit 17dffab1dab6e9b2b21fe686c1a7de4a1453d3ee +Author: Gou Rao +Date: Wed Sep 6 05:42:00 2017 +0000 + + redo logo + +commit da0d9394feefac1e44931a095f0f5b4d791a1959 +Author: Gou Rao +Date: Wed Sep 6 05:40:58 2017 +0000 + + redo logo + +commit 96af825a793cc0d0ebcb673b77a8ac6b36353e13 +Author: Gou Rao +Date: Wed Sep 6 05:38:20 2017 +0000 + + redo logo + +commit 083d5c68ff27d00db39ff9111e4b6e7d52ebb320 +Author: Gou Rao +Date: Wed Sep 6 05:37:45 2017 +0000 + + redo logo + +commit 9b93c3b2624cbd2b5a3b0556aafe436fb4ccb8e8 +Author: Gou Rao +Date: Wed Sep 6 05:35:59 2017 +0000 + + redo logo + +commit a1f08c0ca06eb2f0a5d1d18150f7a966a51268c0 +Author: Gou Rao +Date: Wed Sep 6 05:12:09 2017 +0000 + + initial skeleton framework + +commit 6df9f0dec16d433cef9ff28465cfcd1660844cae +Author: Gou Rao +Date: Wed Sep 6 05:11:38 2017 +0000 + + initial skeleton framework + +commit e1f2f1578f1b02aca9dc0770fee9632788ee6344 +Author: venkatpx +Date: Tue Sep 5 21:20:51 2017 -0700 + + Initial commit diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index 7e5c9710d1..ea20a2a48f 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -306,13 +306,19 @@ func (a *ApplicationBackupController) handle(ctx context.Context, backup *stork_ } // Try to create the backupLocation path, just log error if it fails - err := a.createBackupLocationPath(backup) + backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) if err != nil { - log.ApplicationBackupLog(backup).Errorf(err.Error()) - a.recorder.Event(backup, - v1.EventTypeWarning, - string(stork_api.ApplicationBackupStatusFailed), - err.Error()) + return fmt.Errorf("error getting backup location path: %v", err) + } + if backupLocation.Location.Type != stork_api.BackupLocationNFS { + err := a.createBackupLocationPath(backup) + if err != nil { + log.ApplicationBackupLog(backup).Errorf(err.Error()) + a.recorder.Event(backup, + v1.EventTypeWarning, + string(stork_api.ApplicationBackupStatusFailed), + err.Error()) + } } // Make sure the rules exist if configured diff --git a/pkg/objectstore/nfs/nfs.go b/pkg/objectstore/nfs/nfs.go new file mode 100644 index 0000000000..48bb44333f --- /dev/null +++ b/pkg/objectstore/nfs/nfs.go @@ -0,0 +1,13 @@ +package nfs + +import ( + stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/libopenstorage/stork/pkg/objectstore/common" + "github.com/sirupsen/logrus" +) + +// GetObjLockInfo fetches the object lock configuration of a bucket +func GetObjLockInfo(backupLocation *stork_api.BackupLocation) (*common.ObjLockInfo, error) { + logrus.Infof("object lock is not supported for nfs server") + return &common.ObjLockInfo{}, nil +} diff --git a/pkg/objectstore/objectstore.go b/pkg/objectstore/objectstore.go index b591fac999..6134c6e31d 100644 --- a/pkg/objectstore/objectstore.go +++ b/pkg/objectstore/objectstore.go @@ -7,6 +7,7 @@ import ( "github.com/libopenstorage/stork/pkg/objectstore/azure" "github.com/libopenstorage/stork/pkg/objectstore/common" "github.com/libopenstorage/stork/pkg/objectstore/google" + "github.com/libopenstorage/stork/pkg/objectstore/nfs" "github.com/libopenstorage/stork/pkg/objectstore/s3" "gocloud.dev/blob" ) @@ -59,6 +60,8 @@ func GetObjLockInfo(backupLocation *stork_api.BackupLocation) (*common.ObjLockIn return azure.GetObjLockInfo(backupLocation) case stork_api.BackupLocationS3: return s3.GetObjLockInfo(backupLocation) + case stork_api.BackupLocationNFS: + return nfs.GetObjLockInfo(backupLocation) default: return nil, fmt.Errorf("invalid backupLocation type: %v", backupLocation.Location.Type) } From 5267c6eef698bffb746b81686803f78a53b52767 Mon Sep 17 00:00:00 2001 From: Prashanth Kumar Date: Mon, 12 Sep 2022 04:39:49 +0000 Subject: [PATCH 56/97] Initializing resource export controller --- cmd/stork/stork.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cmd/stork/stork.go b/cmd/stork/stork.go index fa41ad5b5b..58a06d13c0 100644 --- a/cmd/stork/stork.go +++ b/cmd/stork/stork.go @@ -42,6 +42,7 @@ import ( "github.com/libopenstorage/stork/pkg/webhookadmission" kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" "github.com/portworx/kdmp/pkg/controllers/dataexport" + "github.com/portworx/kdmp/pkg/controllers/resourceexport" "github.com/portworx/kdmp/pkg/drivers" "github.com/portworx/kdmp/pkg/jobratelimit" kdmpversion "github.com/portworx/kdmp/pkg/version" @@ -529,6 +530,14 @@ func runStork(mgr manager.Manager, d volume.Driver, recorder record.EventRecorde if err := dataexport.Init(mgr); err != nil { log.Fatalf("Error initializing kdmp controller: %v", err) } + resourceexport, err := resourceexport.NewController(mgr) + if err != nil { + log.Fatalf("Error initializing resource export controller: %v", err) + } + + if err := resourceexport.Init(mgr); err != nil { + log.Fatalf("Error initializing resource export controller manager: %v", err) + } } ctx := context.Background() From dee34668f054e0e741dc98dffa256c89f1586bec Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Tue, 13 Sep 2022 04:40:57 +0000 Subject: [PATCH 57/97] pb-3062: Added code to create resource export CR for nfs resource backup. --- .../controllers/applicationbackup.go | 168 ++++++++++++++++++ 1 file changed, 168 insertions(+) diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index ea20a2a48f..8a8e6a14ea 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "github.com/aquilax/truncate" "github.com/go-openapi/inflect" "github.com/libopenstorage/stork/drivers" "github.com/libopenstorage/stork/drivers/volume" @@ -27,8 +28,10 @@ import ( "github.com/libopenstorage/stork/pkg/resourcecollector" "github.com/libopenstorage/stork/pkg/rule" "github.com/libopenstorage/stork/pkg/version" + kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" "github.com/portworx/sched-ops/k8s/apiextensions" "github.com/portworx/sched-ops/k8s/core" + kdmpShedOps "github.com/portworx/sched-ops/k8s/kdmp" storkops "github.com/portworx/sched-ops/k8s/stork" "github.com/sirupsen/logrus" "gocloud.dev/gcerrors" @@ -40,6 +43,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -70,6 +74,18 @@ const ( kdmpDriverOnly = "kdmp" nonKdmpDriverOnly = "nonkdmp" mixedDriver = "mixed" + prefixBackup = "backup" + applicationBackupCRNameKey = kdmpAnnotationPrefix + "applicationbackup-cr-name" + applicationBackupCRUIDKey = kdmpAnnotationPrefix + "applicationbackup-cr-uid" + kdmpAnnotationPrefix = "kdmp.portworx.com/" + pxbackupAnnotationCreateByKey = pxbackupAnnotationPrefix + "created-by" + pxbackupAnnotationCreateByValue = "px-backup" + backupObjectNameKey = kdmpAnnotationPrefix + "backupobject-name" + pxbackupObjectUIDKey = pxbackupAnnotationPrefix + "backup-uid" + pxbackupAnnotationPrefix = "portworx.io/" + pxbackupObjectNameKey = pxbackupAnnotationPrefix + "backup-name" + backupObjectUIDKey = kdmpAnnotationPrefix + "backupobject-uid" + skipResourceAnnotation = "stork.libopenstorage.org/skip-resource" ) var ( @@ -1154,11 +1170,55 @@ func (a *ApplicationBackupController) uploadMetadata( return a.uploadObject(backup, metadataObjectName, jsonBytes) } +func (a *ApplicationBackupController) isNFSBackuplocationType( + backup *stork_api.ApplicationBackup, +) (bool, error) { + backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) + if err != nil { + return false, fmt.Errorf("error getting backup location path: %v", err) + } + if backupLocation.Location.Type == stork_api.BackupLocationNFS { + return true, nil + } + return false, nil +} + +// getShortUID returns the first part of the UID +func getShortUID(uid string) string { + if len(uid) < 8 { + return "" + } + return uid[0:7] +} + +// getValidLabel - will validate the label to make sure the length is less 63 and contains valid label format. +// If the length is greater then 63, it will truncate to 63 character. +func getValidLabel(labelVal string) string { + if len(labelVal) > validation.LabelValueMaxLength { + labelVal = truncate.Truncate(labelVal, validation.LabelValueMaxLength, "", truncate.PositionEnd) + // make sure the truncated value does not end with the hyphen. + labelVal = strings.Trim(labelVal, "-") + // make sure the truncated value does not end with the dot. + labelVal = strings.Trim(labelVal, ".") + } + return labelVal +} + +func getResourceExportCRName(opsPrefix, crUID, ns string) string { + name := fmt.Sprintf("%s-%s-%s", opsPrefix, getShortUID(crUID), ns) + name = getValidLabel(name) + return name +} + func (a *ApplicationBackupController) backupResources( backup *stork_api.ApplicationBackup, ) error { var err error var resourceTypes []metav1.APIResource + nfs, err := a.isNFSBackuplocationType(backup) + if err != nil { + logrus.Errorf("error in checking backuplocation type") + } // Listing all resource types if len(backup.Spec.ResourceTypes) != 0 { optionalResourceTypes := []string{} @@ -1310,6 +1370,100 @@ func (a *ApplicationBackupController) backupResources( return err } + if nfs { + // Check whether ResourceExport is preset or not + crName := getResourceExportCRName(prefixBackup, string(backup.UID), backup.Namespace) + resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, backup.Namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + // create resource export CR + resourceExport := &kdmpapi.ResourceExport{} + // Adding required label for debugging + labels := make(map[string]string) + labels[applicationBackupCRNameKey] = getValidLabel(backup.Name) + labels[applicationBackupCRUIDKey] = getValidLabel(getShortUID(string(backup.UID))) + // If backup from px-backup, update the backup object details in the label + if val, ok := backup.Annotations[pxbackupAnnotationCreateByKey]; ok { + if val == pxbackupAnnotationCreateByValue { + labels[backupObjectNameKey] = getValidLabel(backup.Annotations[pxbackupObjectNameKey]) + labels[backupObjectUIDKey] = getValidLabel(backup.Annotations[pxbackupObjectUIDKey]) + } + } + resourceExport.Labels = labels + resourceExport.Annotations = make(map[string]string) + resourceExport.Annotations[skipResourceAnnotation] = "true" + resourceExport.Name = getResourceExportCRName(prefixBackup, string(backup.UID), backup.Namespace) + resourceExport.Namespace = backup.Namespace + source := &kdmpapi.ResourceExportObjectReference{ + APIVersion: backup.APIVersion, + Kind: backup.Kind, + Namespace: backup.Namespace, + Name: backup.Name, + } + backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) + if err != nil { + return fmt.Errorf("error getting backup location path: %v", err) + } + destination := &kdmpapi.ResourceExportObjectReference{ + // TODO: .GetBackupLocation is not returning APIVersion and kind. + // Hardcoding for now. + // APIVersion: backupLocation.APIVersion, + // Kind: backupLocation.Kind, + APIVersion: "stork.libopenstorage.org/v1alpha1", + Kind: "BackupLocation", + Namespace: backupLocation.Namespace, + Name: backupLocation.Name, + } + resourceExport.Spec.Source = *source + resourceExport.Spec.Destination = *destination + _, err = kdmpShedOps.Instance().CreateResourceExport(resourceExport) + if err != nil { + logrus.Errorf("failed to create DataExport CR: %v", err) + return err + } + return nil + } + logrus.Errorf("failed to get backup resourceExport CR: %v", err) + // Will retry in the next cycle of reconciler. + return nil + } else { + var message string + // Check the status of the resourceExport CR and update it to the applicationBackup CR + switch resourceExport.Status { + case kdmpapi.ResourceExportStatusFailed: + message = fmt.Sprintf("Error uploading resources: %v", err) + backup.Status.Status = stork_api.ApplicationBackupStatusFailed + backup.Status.Stage = stork_api.ApplicationBackupStageFinal + backup.Status.Reason = message + backup.Status.LastUpdateTimestamp = metav1.Now() + err = a.client.Update(context.TODO(), backup) + if err != nil { + return err + } + a.recorder.Event(backup, + v1.EventTypeWarning, + string(stork_api.ApplicationBackupStatusFailed), + message) + log.ApplicationBackupLog(backup).Errorf(message) + return err + case kdmpapi.ResourceExportStatusSuccessful: + backup.Status.BackupPath = GetObjectPath(backup) + backup.Status.Stage = stork_api.ApplicationBackupStageFinal + backup.Status.FinishTimestamp = metav1.Now() + backup.Status.Status = stork_api.ApplicationBackupStatusSuccessful + backup.Status.Reason = "Volumes and resources were backed up successfully" + case kdmpapi.ResourceExportStatusInitial: + case kdmpapi.ResourceExportStatusPending: + case kdmpapi.ResourceExportStatusInProgress: + backup.Status.LastUpdateTimestamp = metav1.Now() + } + err = a.client.Update(context.TODO(), backup) + if err != nil { + return err + } + return nil + } + } // Upload the resources to the backup location if err = a.uploadResources(backup, allObjects); err != nil { message := fmt.Sprintf("Error uploading resources: %v", err) @@ -1400,6 +1554,11 @@ func (a *ApplicationBackupController) deleteBackup(backup *stork_api.Application } return true, err } + // TODO: for nfs type, we need to invoke job based deletion. + // For now, skipping it. + if backupLocation.Location.Type == stork_api.BackupLocationNFS { + return true, nil + } bucket, err := objectstore.GetBucket(backupLocation) if err != nil { return true, err @@ -1504,6 +1663,15 @@ func (a *ApplicationBackupController) cleanupResources( logrus.Errorf("unable to cleanup post backup resources, err: %v", err) } } + // Directly calling DeleteResourceExport with out checking backuplocation type. + // For other backuplocation type, expecting Notfound + crName := getResourceExportCRName(prefixBackup, string(backup.UID), backup.Namespace) + err := kdmpShedOps.Instance().DeleteResourceExport(crName, backup.Namespace) + if err != nil && !k8s_errors.IsNotFound(err) { + errMsg := fmt.Sprintf("failed to delete data export CR [%v]: %v", crName, err) + log.ApplicationBackupLog(backup).Errorf("%v", errMsg) + return err + } return nil } From 70a43d8f78f642dead8f148f5811d91107e54725 Mon Sep 17 00:00:00 2001 From: Prashanth Kumar Date: Wed, 14 Sep 2022 09:11:20 +0000 Subject: [PATCH 58/97] pb-3093: Setting nfs as type while creating ResourceExport CR --- pkg/applicationmanager/controllers/applicationbackup.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index 8a8e6a14ea..207b8cf029 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -1394,6 +1394,7 @@ func (a *ApplicationBackupController) backupResources( resourceExport.Annotations[skipResourceAnnotation] = "true" resourceExport.Name = getResourceExportCRName(prefixBackup, string(backup.UID), backup.Namespace) resourceExport.Namespace = backup.Namespace + resourceExport.Spec.Type = kdmpapi.ResourceExportBackup source := &kdmpapi.ResourceExportObjectReference{ APIVersion: backup.APIVersion, Kind: backup.Kind, @@ -1429,7 +1430,7 @@ func (a *ApplicationBackupController) backupResources( } else { var message string // Check the status of the resourceExport CR and update it to the applicationBackup CR - switch resourceExport.Status { + switch resourceExport.Status.Status { case kdmpapi.ResourceExportStatusFailed: message = fmt.Sprintf("Error uploading resources: %v", err) backup.Status.Status = stork_api.ApplicationBackupStatusFailed From 38e6416d1dcef628d10b8990688b71bbbd2e98a0 Mon Sep 17 00:00:00 2001 From: diptiranjan Date: Tue, 20 Sep 2022 17:06:12 +0530 Subject: [PATCH 59/97] PB-3111: Support for nfs backuplocation in restore resources in application restore controller. --- .../controllers/applicationbackup.go | 9 +- .../controllers/applicationrestore.go | 168 ++++++++++++++++-- 2 files changed, 159 insertions(+), 18 deletions(-) diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index 207b8cf029..6c06f8b474 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -75,16 +75,21 @@ const ( nonKdmpDriverOnly = "nonkdmp" mixedDriver = "mixed" prefixBackup = "backup" + prefixRestore = "restore" applicationBackupCRNameKey = kdmpAnnotationPrefix + "applicationbackup-cr-name" + applicationRestoreCRNameKey = kdmpAnnotationPrefix + "applicationrestore-cr-name" applicationBackupCRUIDKey = kdmpAnnotationPrefix + "applicationbackup-cr-uid" + applicationRestoreCRUIDKey = kdmpAnnotationPrefix + "applicationrestore-cr-uid" kdmpAnnotationPrefix = "kdmp.portworx.com/" pxbackupAnnotationCreateByKey = pxbackupAnnotationPrefix + "created-by" pxbackupAnnotationCreateByValue = "px-backup" backupObjectNameKey = kdmpAnnotationPrefix + "backupobject-name" + restoreObjectNameKey = kdmpAnnotationPrefix + "restoreobject-name" pxbackupObjectUIDKey = pxbackupAnnotationPrefix + "backup-uid" pxbackupAnnotationPrefix = "portworx.io/" pxbackupObjectNameKey = pxbackupAnnotationPrefix + "backup-name" backupObjectUIDKey = kdmpAnnotationPrefix + "backupobject-uid" + restoreObjectUIDKey = kdmpAnnotationPrefix + "restoreobject-uid" skipResourceAnnotation = "stork.libopenstorage.org/skip-resource" ) @@ -1170,7 +1175,7 @@ func (a *ApplicationBackupController) uploadMetadata( return a.uploadObject(backup, metadataObjectName, jsonBytes) } -func (a *ApplicationBackupController) isNFSBackuplocationType( +func IsNFSBackuplocationType( backup *stork_api.ApplicationBackup, ) (bool, error) { backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) @@ -1215,7 +1220,7 @@ func (a *ApplicationBackupController) backupResources( ) error { var err error var resourceTypes []metav1.APIResource - nfs, err := a.isNFSBackuplocationType(backup) + nfs, err := IsNFSBackuplocationType(backup) if err != nil { logrus.Errorf("error in checking backuplocation type") } diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index 66d16c2086..aaf355175e 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -19,15 +19,17 @@ import ( "github.com/libopenstorage/stork/pkg/objectstore" "github.com/libopenstorage/stork/pkg/resourcecollector" "github.com/libopenstorage/stork/pkg/version" + kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" "github.com/portworx/sched-ops/k8s/apiextensions" "github.com/portworx/sched-ops/k8s/core" + kdmpShedOps "github.com/portworx/sched-ops/k8s/kdmp" storkops "github.com/portworx/sched-ops/k8s/stork" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -149,7 +151,7 @@ func (a *ApplicationRestoreController) createNamespaces(backup *storkapi.Applica }) log.ApplicationRestoreLog(restore).Tracef("Creating dest namespace %v", ns.Name) if err != nil { - if errors.IsAlreadyExists(err) { + if k8s_errors.IsAlreadyExists(err) { oldNS, err := core.Instance().GetNamespace(ns.GetName()) if err != nil { return err @@ -201,7 +203,7 @@ func (a *ApplicationRestoreController) createNamespaces(backup *storkapi.Applica } for _, namespace := range restore.Spec.NamespaceMapping { if ns, err := core.Instance().GetNamespace(namespace); err != nil { - if errors.IsNotFound(err) { + if k8s_errors.IsNotFound(err) { if _, err := core.Instance().CreateNamespace(&v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: ns.Name, @@ -226,7 +228,7 @@ func (a *ApplicationRestoreController) Reconcile(ctx context.Context, request re restore := &storkapi.ApplicationRestore{} err := a.client.Get(context.TODO(), request.NamespacedName, restore) if err != nil { - if errors.IsNotFound(err) { + if k8s_errors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. // Return and don't requeue @@ -796,7 +798,7 @@ func (a *ApplicationRestoreController) downloadCRD( for _, crd := range crds { crd.ResourceVersion = "" regCrd[crd.GetName()] = false - if _, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err != nil && !errors.IsAlreadyExists(err) { + if _, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err != nil && !k8s_errors.IsAlreadyExists(err) { regCrd[crd.GetName()] = true logrus.Warnf("error registering crds v1beta1 %v,%v", crd.GetName(), err) continue @@ -813,7 +815,7 @@ func (a *ApplicationRestoreController) downloadCRD( var updatedVersions []apiextensionsv1.CustomResourceDefinitionVersion // try to apply as v1 crd var err error - if _, err = client.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err == nil || errors.IsAlreadyExists(err) { + if _, err = client.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err == nil || k8s_errors.IsAlreadyExists(err) { logrus.Infof("registered v1 crds %v,", crd.GetName()) continue } @@ -833,7 +835,7 @@ func (a *ApplicationRestoreController) downloadCRD( } crd.Spec.Versions = updatedVersions - if _, err := client.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err != nil && !errors.IsAlreadyExists(err) { + if _, err := client.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err != nil && !k8s_errors.IsAlreadyExists(err) { logrus.Warnf("error registering crdsv1 %v,%v", crd.GetName(), err) continue } @@ -901,6 +903,26 @@ func (a *ApplicationRestoreController) updateResourceStatus( return nil } +func (a *ApplicationRestoreController) updateResourceStatusFromRestoreCR( + restore *storkapi.ApplicationRestore, + resource *kdmpapi.ResourceRestoreResourceInfo, + status storkapi.ApplicationRestoreStatusType, + reason string, +) { + updatedResource := &storkapi.ApplicationRestoreResourceInfo{ + ObjectInfo: storkapi.ObjectInfo{ + Name: resource.Name, + Namespace: resource.Namespace, + GroupVersionKind: metav1.GroupVersionKind{ + Group: resource.Group, + Version: resource.Version, + Kind: resource.Kind, + }, + }, + } + restore.Status.Resources = append(restore.Status.Resources, updatedResource) +} + func (a *ApplicationRestoreController) getPVNameMappings( restore *storkapi.ApplicationRestore, objects []runtime.Unstructured, @@ -1022,7 +1044,7 @@ func (a *ApplicationRestoreController) skipVolumesFromRestoreList( ns := val pvc, err := core.Instance().GetPersistentVolumeClaim(pvcObject.Name, ns) if err != nil { - if errors.IsNotFound(err) { + if k8s_errors.IsNotFound(err) { newVolInfos = append(newVolInfos, bkupVolInfo) continue } @@ -1188,7 +1210,7 @@ func (a *ApplicationRestoreController) applyResources( err = a.resourceCollector.ApplyResource( a.dynamicInterface, o) - if err != nil && errors.IsAlreadyExists(err) { + if err != nil && k8s_errors.IsAlreadyExists(err) { switch restore.Spec.ReplacePolicy { case storkapi.ApplicationRestoreReplacePolicyDelete: log.ApplicationRestoreLog(restore).Errorf("Error deleting %v %v during restore: %v", objectType.GetKind(), metadata.GetName(), err) @@ -1237,14 +1259,119 @@ func (a *ApplicationRestoreController) restoreResources( return err } - objects, err := a.downloadResources(backup, restore.Spec.BackupLocation, restore.Namespace) + nfs, err := IsNFSBackuplocationType(backup) if err != nil { - log.ApplicationRestoreLog(restore).Errorf("Error downloading resources: %v", err) - return err + logrus.Errorf("error in checking backuplocation type") } - if err := a.applyResources(restore, objects); err != nil { - return err + if !nfs { + objects, err := a.downloadResources(backup, restore.Spec.BackupLocation, restore.Namespace) + if err != nil { + log.ApplicationRestoreLog(restore).Errorf("Error downloading resources: %v", err) + return err + } + + if err := a.applyResources(restore, objects); err != nil { + return err + } + } else { + // Check whether ResourceExport is preset or not + crName := getResourceExportCRName(prefixRestore, string(restore.UID), restore.Namespace) + resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, restore.Namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + // create resource export CR + resourceExport := &kdmpapi.ResourceExport{} + // Adding required label for debugging + labels := make(map[string]string) + labels[applicationRestoreCRNameKey] = getValidLabel(restore.Name) + labels[applicationRestoreCRUIDKey] = getValidLabel(getShortUID(string(restore.UID))) + // If restore from px-backup, update the restore object details in the label + if val, ok := backup.Annotations[pxbackupAnnotationCreateByKey]; ok { + if val == pxbackupAnnotationCreateByValue { + labels[restoreObjectNameKey] = getValidLabel(backup.Annotations[pxbackupObjectNameKey]) + labels[restoreObjectUIDKey] = getValidLabel(backup.Annotations[pxbackupObjectUIDKey]) + } + } + resourceExport.Labels = labels + resourceExport.Annotations = make(map[string]string) + resourceExport.Annotations[skipResourceAnnotation] = "true" + resourceExport.Name = crName + resourceExport.Namespace = restore.Namespace + resourceExport.Spec.Type = kdmpapi.ResourceExportBackup + source := &kdmpapi.ResourceExportObjectReference{ + APIVersion: restore.APIVersion, + Kind: restore.Kind, + Namespace: restore.Namespace, + Name: restore.Name, + } + backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) + if err != nil { + return fmt.Errorf("error getting backup location path: %v", err) + } + destination := &kdmpapi.ResourceExportObjectReference{ + // TODO: .GetBackupLocation is not returning APIVersion and kind. + // Hardcoding for now. + // APIVersion: backupLocation.APIVersion, + // Kind: backupLocation.Kind, + APIVersion: "stork.libopenstorage.org/v1alpha1", + Kind: "BackupLocation", + Namespace: backupLocation.Namespace, + Name: backupLocation.Name, + } + resourceExport.Spec.Source = *source + resourceExport.Spec.Destination = *destination + _, err = kdmpShedOps.Instance().CreateResourceExport(resourceExport) + if err != nil { + logrus.Errorf("failed to create DataExport CR: %v", err) + return err + } + return nil + } + logrus.Errorf("failed to get restore resourceExport CR: %v", err) + // Will retry in the next cycle of reconciler. + return nil + } else { + var message string + // Check the status of the resourceExport CR and update it to the applicationBackup CR + switch resourceExport.Status.Status { + case kdmpapi.ResourceExportStatusFailed: + message = fmt.Sprintf("Error applying resources: %v", err) + restore.Status.Status = storkapi.ApplicationRestoreStatusFailed + restore.Status.Stage = storkapi.ApplicationRestoreStageFinal + restore.Status.Reason = message + restore.Status.LastUpdateTimestamp = metav1.Now() + err = a.client.Update(context.TODO(), restore) + if err != nil { + return err + } + a.recorder.Event(restore, + v1.EventTypeWarning, + string(storkapi.ApplicationRestoreStatusFailed), + message) + log.ApplicationRestoreLog(restore).Errorf(message) + return err + case kdmpapi.ResourceExportStatusSuccessful: + // Modify to have subresource level updating + for _, resource := range resourceExport.Spec.Resources { + a.updateResourceStatusFromRestoreCR( + restore, + resource, + storkapi.ApplicationRestoreStatusType(resource.Status), + resource.Reason) + } + + case kdmpapi.ResourceExportStatusInitial: + case kdmpapi.ResourceExportStatusPending: + case kdmpapi.ResourceExportStatusInProgress: + restore.Status.LastUpdateTimestamp = metav1.Now() + } + err = a.client.Update(context.TODO(), restore) + if err != nil { + return err + } + return nil + } } // Before updating to final stage, cleanup generic backup CRs, if any. err = a.cleanupResources(restore) @@ -1371,13 +1498,13 @@ func (a *ApplicationRestoreController) createCRD() error { } if ok { err := k8sutils.CreateCRD(resource) - if err != nil && !errors.IsAlreadyExists(err) { + if err != nil && !k8s_errors.IsAlreadyExists(err) { return err } return apiextensions.Instance().ValidateCRD(resource.Plural+"."+resource.Group, validateCRDTimeout, validateCRDInterval) } err = apiextensions.Instance().CreateCRDV1beta1(resource) - if err != nil && !errors.IsAlreadyExists(err) { + if err != nil && !k8s_errors.IsAlreadyExists(err) { return err } return apiextensions.Instance().ValidateCRDV1beta1(resource, validateCRDTimeout, validateCRDInterval) @@ -1395,5 +1522,14 @@ func (a *ApplicationRestoreController) cleanupResources(restore *storkapi.Applic logrus.Errorf("unable to cleanup post restore resources, err: %v", err) } } + // Directly calling DeleteResourceExport with out checking backuplocation type. + // For other backuplocation type, expecting Notfound + crName := getResourceExportCRName(prefixBackup, string(restore.UID), restore.Namespace) + err := kdmpShedOps.Instance().DeleteResourceExport(crName, restore.Namespace) + if err != nil && !k8s_errors.IsNotFound(err) { + errMsg := fmt.Sprintf("failed to delete data export CR [%v]: %v", crName, err) + log.ApplicationRestoreLog(restore).Errorf("%v", errMsg) + return err + } return nil } From f477b06d5505211b1bb1e31a168f3fb0e0b303c1 Mon Sep 17 00:00:00 2001 From: sivakumar subramani Date: Sat, 24 Sep 2022 20:11:48 +0530 Subject: [PATCH 60/97] pb-3109: moved the const definition and api between kdmp driver and applicationbackup controller code to utils pkg --- drivers/volume/kdmp/kdmp.go | 143 +++++++----------- .../controllers/applicationbackup.go | 65 ++------ .../controllers/applicationrestore.go | 19 +-- pkg/utils/utils.go | 61 ++++++++ 4 files changed, 137 insertions(+), 151 deletions(-) diff --git a/drivers/volume/kdmp/kdmp.go b/drivers/volume/kdmp/kdmp.go index cabce3d341..fec46b2036 100644 --- a/drivers/volume/kdmp/kdmp.go +++ b/drivers/volume/kdmp/kdmp.go @@ -2,12 +2,12 @@ package kdmp import ( "fmt" + "github.com/libopenstorage/stork/pkg/utils" "os" "reflect" "strings" "time" - "github.com/aquilax/truncate" snapv1 "github.com/kubernetes-incubator/external-storage/snapshot/pkg/apis/crd/v1" snapshotVolume "github.com/kubernetes-incubator/external-storage/snapshot/pkg/volume" stork_driver "github.com/libopenstorage/stork/drivers" @@ -31,20 +31,17 @@ import ( k8serror "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/wait" k8shelper "k8s.io/component-helpers/storage/volume" ) const ( - prefixRepo = "generic-backup" - prefixRestore = "restore" - prefixBackup = "backup" - prefixDelete = "delete" - skipResourceAnnotation = "stork.libopenstorage.org/skip-resource" - volumeinitialDelay = 2 * time.Second - volumeFactor = 1.5 - volumeSteps = 20 + prefixRepo = "generic-backup" + prefixRestore = "restore" + prefixDelete = "delete" + volumeinitialDelay = 2 * time.Second + volumeFactor = 1.5 + volumeSteps = 20 // StorkAPIVersion current api version supported by stork StorkAPIVersion = "stork.libopenstorage.org/v1alpha1" // KdmpAPIVersion current api version supported by KDMP @@ -60,32 +57,19 @@ const ( // StorkAnnotation for pvcs created by stork-kdmp driver StorkAnnotation = "stork.libopenstorage.org/kdmp" // backupUID annotation key - backupUIDKey = "portworx.io/backup-uid" - - pxbackupAnnotationPrefix = "portworx.io/" - pxbackupAnnotationCreateByKey = pxbackupAnnotationPrefix + "created-by" - pxbackupAnnotationCreateByValue = "px-backup" - pxbackupObjectUIDKey = pxbackupAnnotationPrefix + "backup-uid" - pxbackupObjectNameKey = pxbackupAnnotationPrefix + "backup-name" - pxRestoreObjectUIDKey = pxbackupAnnotationPrefix + "restore-uid" - pxRestoreObjectNameKey = pxbackupAnnotationPrefix + "restore-name" + backupUIDKey = "portworx.io/backup-uid" + pxRestoreObjectUIDKey = utils.PxbackupAnnotationPrefix + "restore-uid" + pxRestoreObjectNameKey = utils.PxbackupAnnotationPrefix + "restore-name" //kdmp related labels - kdmpAnnotationPrefix = "kdmp.portworx.com/" - // backup related Labels - applicationBackupCRNameKey = kdmpAnnotationPrefix + "applicationbackup-cr-name" - applicationBackupCRUIDKey = kdmpAnnotationPrefix + "applicationbackup-cr-uid" - backupObjectNameKey = kdmpAnnotationPrefix + "backupobject-name" - backupObjectUIDKey = kdmpAnnotationPrefix + "backupobject-uid" - // restore related Labels - applicationRestoreCRNameKey = kdmpAnnotationPrefix + "applicationrestore-cr-name" - applicationRestoreCRUIDKey = kdmpAnnotationPrefix + "applicationrestore-cr-uid" - restoreObjectNameKey = kdmpAnnotationPrefix + "restoreobject-name" - restoreObjectUIDKey = kdmpAnnotationPrefix + "restoreobject-uid" + applicationRestoreCRNameKey = utils.KdmpAnnotationPrefix + "applicationrestore-cr-name" + applicationRestoreCRUIDKey = utils.KdmpAnnotationPrefix + "applicationrestore-cr-uid" + restoreObjectNameKey = utils.KdmpAnnotationPrefix + "restoreobject-name" + restoreObjectUIDKey = utils.KdmpAnnotationPrefix + "restoreobject-uid" - pvcNameKey = kdmpAnnotationPrefix + "pvc-name" - pvcUIDKey = kdmpAnnotationPrefix + "pvc-uid" + pvcNameKey = utils.KdmpAnnotationPrefix + "pvc-name" + pvcUIDKey = utils.KdmpAnnotationPrefix + "pvc-uid" // pvcProvisionerAnnotation is the annotation on PVC which has the // provisioner name pvcProvisionerAnnotation = "volume.beta.kubernetes.io/storage-provisioner" @@ -153,8 +137,8 @@ func (k *kdmp) OwnsPV(pv *v1.PersistentVolume) bool { } func getGenericCRName(opsPrefix, crUID, pvcUID, ns string) string { - name := fmt.Sprintf("%s-%s-%s-%s", opsPrefix, getShortUID(crUID), getShortUID(pvcUID), ns) - name = getValidLabel(name) + name := fmt.Sprintf("%s-%s-%s-%s", opsPrefix, utils.GetShortUID(crUID), utils.GetShortUID(pvcUID), ns) + name = utils.GetValidLabel(name) return name } @@ -296,15 +280,15 @@ func (k *kdmp) StartBackup(backup *storkapi.ApplicationBackup, dataExport := &kdmpapi.DataExport{} // Adding required label for debugging labels := make(map[string]string) - labels[applicationBackupCRNameKey] = getValidLabel(backup.Name) - labels[applicationBackupCRUIDKey] = getValidLabel(getShortUID(string(backup.UID))) - labels[pvcNameKey] = getValidLabel(pvc.Name) - labels[pvcUIDKey] = getValidLabel(getShortUID(string(pvc.UID))) + labels[utils.ApplicationBackupCRNameKey] = utils.GetValidLabel(backup.Name) + labels[utils.ApplicationBackupCRUIDKey] = utils.GetValidLabel(utils.GetShortUID(string(backup.UID))) + labels[pvcNameKey] = utils.GetValidLabel(pvc.Name) + labels[pvcUIDKey] = utils.GetValidLabel(utils.GetShortUID(string(pvc.UID))) // If backup from px-backup, update the backup object details in the label - if val, ok := backup.Annotations[pxbackupAnnotationCreateByKey]; ok { - if val == pxbackupAnnotationCreateByValue { - labels[backupObjectNameKey] = getValidLabel(backup.Annotations[pxbackupObjectNameKey]) - labels[backupObjectUIDKey] = getValidLabel(backup.Annotations[pxbackupObjectUIDKey]) + if val, ok := backup.Annotations[utils.PxbackupAnnotationCreateByKey]; ok { + if val == utils.PxbackupAnnotationCreateByValue { + labels[utils.BackupObjectNameKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectNameKey]) + labels[utils.BackupObjectUIDKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectUIDKey]) } } @@ -317,10 +301,10 @@ func (k *kdmp) StartBackup(backup *storkapi.ApplicationBackup, } dataExport.Spec.TriggeredFromNs = storkPodNs dataExport.Annotations = make(map[string]string) - dataExport.Annotations[skipResourceAnnotation] = "true" - dataExport.Annotations[backupObjectUIDKey] = string(backup.Annotations[pxbackupObjectUIDKey]) + dataExport.Annotations[utils.SkipResourceAnnotation] = "true" + dataExport.Annotations[utils.BackupObjectUIDKey] = string(backup.Annotations[utils.PxbackupObjectUIDKey]) dataExport.Annotations[pvcUIDKey] = string(pvc.UID) - dataExport.Name = getGenericCRName(prefixBackup, string(backup.UID), string(pvc.UID), pvc.Namespace) + dataExport.Name = getGenericCRName(utils.PrefixBackup, string(backup.UID), string(pvc.UID), pvc.Namespace) dataExport.Namespace = pvc.Namespace dataExport.Spec.Type = kdmpapi.DataExportKopia dataExport.Spec.Destination = kdmpapi.DataExportObjectReference{ @@ -356,7 +340,7 @@ func (k *kdmp) GetBackupStatus(backup *storkapi.ApplicationBackup) ([]*storkapi. if vInfo.DriverName != storkvolume.KDMPDriverName { continue } - crName := getGenericCRName(prefixBackup, string(backup.UID), vInfo.PersistentVolumeClaimUID, vInfo.Namespace) + crName := getGenericCRName(utils.PrefixBackup, string(backup.UID), vInfo.PersistentVolumeClaimUID, vInfo.Namespace) dataExport, err := kdmpShedOps.Instance().GetDataExport(crName, vInfo.Namespace) if err != nil { logrus.Errorf("failed to get backup DataExport CR: %v", err) @@ -419,7 +403,7 @@ func isDataExportCompleted(status kdmpapi.ExportStatus) bool { func (k *kdmp) CancelBackup(backup *storkapi.ApplicationBackup) error { for _, vInfo := range backup.Status.Volumes { - crName := getGenericCRName(prefixBackup, string(backup.UID), vInfo.PersistentVolumeClaimUID, vInfo.Namespace) + crName := getGenericCRName(utils.PrefixBackup, string(backup.UID), vInfo.PersistentVolumeClaimUID, vInfo.Namespace) err := kdmpShedOps.Instance().DeleteDataExport(crName, vInfo.Namespace) if err != nil && k8serror.IsNotFound(err) { errMsg := fmt.Sprintf("failed to delete data export CR [%v]: %v", crName, err) @@ -434,9 +418,9 @@ func (k *kdmp) DeleteBackup(backup *storkapi.ApplicationBackup) (bool, error) { // For Applicationbackup CR created by px-backup, we want to handle deleting // successful PVC (of in-progress backup) from px-backup deleteworker() to avoid two entities // doing the delete of snapshot leading to races. - if val, ok := backup.Annotations[pxbackupAnnotationCreateByKey]; !ok { + if val, ok := backup.Annotations[utils.PxbackupAnnotationCreateByKey]; !ok { return deleteKdmpSnapshot(backup) - } else if val != pxbackupAnnotationCreateByValue { + } else if val != utils.PxbackupAnnotationCreateByValue { return deleteKdmpSnapshot(backup) } else { logrus.Infof("skipping snapshot deletion as ApplicationBackup CR [%v] is created by px-backup", backup.Name) @@ -470,15 +454,15 @@ func deleteKdmpSnapshot(backup *storkapi.ApplicationBackup) (bool, error) { if err != nil && k8serror.IsNotFound(err) { // Adding required label for debugging labels := make(map[string]string) - labels[applicationBackupCRNameKey] = getValidLabel(backup.Name) - labels[applicationBackupCRUIDKey] = getValidLabel(string(backup.UID)) - labels[pvcNameKey] = getValidLabel(vInfo.PersistentVolumeClaim) - labels[pvcUIDKey] = getValidLabel(vInfo.PersistentVolumeClaimUID) + labels[utils.ApplicationBackupCRNameKey] = utils.GetValidLabel(backup.Name) + labels[utils.ApplicationBackupCRUIDKey] = utils.GetValidLabel(string(backup.UID)) + labels[pvcNameKey] = utils.GetValidLabel(vInfo.PersistentVolumeClaim) + labels[pvcUIDKey] = utils.GetValidLabel(vInfo.PersistentVolumeClaimUID) // If backup from px-backup, update the backup object details in the label - if val, ok := backup.Annotations[pxbackupAnnotationCreateByKey]; ok { - if val == pxbackupAnnotationCreateByValue { - labels[backupObjectNameKey] = getValidLabel(backup.Annotations[pxbackupObjectNameKey]) - labels[backupObjectUIDKey] = getValidLabel(backup.Annotations[pxbackupObjectUIDKey]) + if val, ok := backup.Annotations[utils.PxbackupAnnotationCreateByKey]; ok { + if val == utils.PxbackupAnnotationCreateByValue { + labels[utils.BackupObjectNameKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectNameKey]) + labels[utils.BackupObjectUIDKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectUIDKey]) } } err := dataexport.CreateCredentialsSecret(secretName, backup.Spec.BackupLocation, backup.Namespace, backup.Namespace, labels) @@ -727,22 +711,22 @@ func (k *kdmp) StartRestore( // create VolumeBackup CR // Adding required label for debugging labels := make(map[string]string) - labels[applicationRestoreCRNameKey] = getValidLabel(restore.Name) - labels[applicationRestoreCRUIDKey] = getValidLabel(string(restore.UID)) - labels[pvcNameKey] = getValidLabel(bkpvInfo.PersistentVolumeClaim) - labels[pvcUIDKey] = getValidLabel(bkpvInfo.PersistentVolumeClaimUID) + labels[applicationRestoreCRNameKey] = utils.GetValidLabel(restore.Name) + labels[applicationRestoreCRUIDKey] = utils.GetValidLabel(string(restore.UID)) + labels[pvcNameKey] = utils.GetValidLabel(bkpvInfo.PersistentVolumeClaim) + labels[pvcUIDKey] = utils.GetValidLabel(bkpvInfo.PersistentVolumeClaimUID) // If restorefrom px-backup, update the restore object details in the label - if val, ok := restore.Annotations[pxbackupAnnotationCreateByKey]; ok { - if val == pxbackupAnnotationCreateByValue { - labels[restoreObjectNameKey] = getValidLabel(restore.Annotations[pxbackupObjectNameKey]) - labels[restoreObjectUIDKey] = getValidLabel(restore.Annotations[pxbackupObjectUIDKey]) + if val, ok := restore.Annotations[utils.PxbackupAnnotationCreateByKey]; ok { + if val == utils.PxbackupAnnotationCreateByValue { + labels[restoreObjectNameKey] = utils.GetValidLabel(restore.Annotations[utils.PxbackupObjectNameKey]) + labels[restoreObjectUIDKey] = utils.GetValidLabel(restore.Annotations[utils.PxbackupObjectUIDKey]) } } volBackup := &kdmpapi.VolumeBackup{} volBackup.Labels = labels volBackup.Annotations = make(map[string]string) - volBackup.Annotations[skipResourceAnnotation] = "true" + volBackup.Annotations[utils.SkipResourceAnnotation] = "true" volBackup.Name = getGenericCRName(prefixRestore, string(restore.UID), bkpvInfo.PersistentVolumeClaimUID, restoreNamespace) volBackup.Namespace = restoreNamespace volBackup.Spec.BackupLocation = kdmpapi.DataExportObjectReference{ @@ -781,8 +765,8 @@ func (k *kdmp) StartRestore( } dataExport.Spec.TriggeredFromNs = storkPodNs dataExport.Annotations = make(map[string]string) - dataExport.Annotations[skipResourceAnnotation] = "true" - dataExport.Annotations[backupObjectUIDKey] = backupUID + dataExport.Annotations[utils.SkipResourceAnnotation] = "true" + dataExport.Annotations[utils.BackupObjectUIDKey] = backupUID dataExport.Annotations[pvcUIDKey] = bkpvInfo.PersistentVolumeClaimUID dataExport.Name = getGenericCRName(prefixRestore, string(restore.UID), bkpvInfo.PersistentVolumeClaimUID, restoreNamespace) dataExport.Namespace = restoreNamespace @@ -951,7 +935,7 @@ func (k *kdmp) CleanupBackupResources(backup *storkapi.ApplicationBackup) error if vInfo.DriverName != storkvolume.KDMPDriverName { continue } - crName := getGenericCRName(prefixBackup, string(backup.UID), vInfo.PersistentVolumeClaimUID, vInfo.Namespace) + crName := getGenericCRName(utils.PrefixBackup, string(backup.UID), vInfo.PersistentVolumeClaimUID, vInfo.Namespace) logrus.Infof("deleting data export CR: %s%s", vInfo.Namespace, crName) de, err := kdmpShedOps.Instance().GetDataExport(crName, vInfo.Namespace) if err != nil && !k8serror.IsNotFound(err) { @@ -1029,27 +1013,6 @@ func init() { } } -// getValidLabel - will validate the label to make sure the length is less 63 and contains valid label format. -// If the length is greater then 63, it will truncate to 63 character. -func getValidLabel(labelVal string) string { - if len(labelVal) > validation.LabelValueMaxLength { - labelVal = truncate.Truncate(labelVal, validation.LabelValueMaxLength, "", truncate.PositionEnd) - // make sure the truncated value does not end with the hyphen. - labelVal = strings.Trim(labelVal, "-") - // make sure the truncated value does not end with the dot. - labelVal = strings.Trim(labelVal, ".") - } - return labelVal -} - -// getShortUID returns the first part of the UID -func getShortUID(uid string) string { - if len(uid) < 8 { - return "" - } - return uid[0:7] -} - // getVolumeSnapshotClassFromBackupVolumeInfo returns the volumesnapshotclass if it is present func getVolumeSnapshotClassFromBackupVolumeInfo(bkvpInfo *storkapi.ApplicationBackupVolumeInfo) string { var vsClass string diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index 6c06f8b474..23833343ee 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -13,7 +13,6 @@ import ( "strings" "time" - "github.com/aquilax/truncate" "github.com/go-openapi/inflect" "github.com/libopenstorage/stork/drivers" "github.com/libopenstorage/stork/drivers/volume" @@ -27,6 +26,7 @@ import ( "github.com/libopenstorage/stork/pkg/objectstore" "github.com/libopenstorage/stork/pkg/resourcecollector" "github.com/libopenstorage/stork/pkg/rule" + "github.com/libopenstorage/stork/pkg/utils" "github.com/libopenstorage/stork/pkg/version" kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" "github.com/portworx/sched-ops/k8s/apiextensions" @@ -43,7 +43,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -74,23 +73,6 @@ const ( kdmpDriverOnly = "kdmp" nonKdmpDriverOnly = "nonkdmp" mixedDriver = "mixed" - prefixBackup = "backup" - prefixRestore = "restore" - applicationBackupCRNameKey = kdmpAnnotationPrefix + "applicationbackup-cr-name" - applicationRestoreCRNameKey = kdmpAnnotationPrefix + "applicationrestore-cr-name" - applicationBackupCRUIDKey = kdmpAnnotationPrefix + "applicationbackup-cr-uid" - applicationRestoreCRUIDKey = kdmpAnnotationPrefix + "applicationrestore-cr-uid" - kdmpAnnotationPrefix = "kdmp.portworx.com/" - pxbackupAnnotationCreateByKey = pxbackupAnnotationPrefix + "created-by" - pxbackupAnnotationCreateByValue = "px-backup" - backupObjectNameKey = kdmpAnnotationPrefix + "backupobject-name" - restoreObjectNameKey = kdmpAnnotationPrefix + "restoreobject-name" - pxbackupObjectUIDKey = pxbackupAnnotationPrefix + "backup-uid" - pxbackupAnnotationPrefix = "portworx.io/" - pxbackupObjectNameKey = pxbackupAnnotationPrefix + "backup-name" - backupObjectUIDKey = kdmpAnnotationPrefix + "backupobject-uid" - restoreObjectUIDKey = kdmpAnnotationPrefix + "restoreobject-uid" - skipResourceAnnotation = "stork.libopenstorage.org/skip-resource" ) var ( @@ -1188,30 +1170,9 @@ func IsNFSBackuplocationType( return false, nil } -// getShortUID returns the first part of the UID -func getShortUID(uid string) string { - if len(uid) < 8 { - return "" - } - return uid[0:7] -} - -// getValidLabel - will validate the label to make sure the length is less 63 and contains valid label format. -// If the length is greater then 63, it will truncate to 63 character. -func getValidLabel(labelVal string) string { - if len(labelVal) > validation.LabelValueMaxLength { - labelVal = truncate.Truncate(labelVal, validation.LabelValueMaxLength, "", truncate.PositionEnd) - // make sure the truncated value does not end with the hyphen. - labelVal = strings.Trim(labelVal, "-") - // make sure the truncated value does not end with the dot. - labelVal = strings.Trim(labelVal, ".") - } - return labelVal -} - func getResourceExportCRName(opsPrefix, crUID, ns string) string { - name := fmt.Sprintf("%s-%s-%s", opsPrefix, getShortUID(crUID), ns) - name = getValidLabel(name) + name := fmt.Sprintf("%s-%s-%s", opsPrefix, utils.GetShortUID(crUID), ns) + name = utils.GetValidLabel(name) return name } @@ -1377,7 +1338,7 @@ func (a *ApplicationBackupController) backupResources( if nfs { // Check whether ResourceExport is preset or not - crName := getResourceExportCRName(prefixBackup, string(backup.UID), backup.Namespace) + crName := getResourceExportCRName(utils.PrefixBackup, string(backup.UID), backup.Namespace) resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, backup.Namespace) if err != nil { if k8s_errors.IsNotFound(err) { @@ -1385,19 +1346,19 @@ func (a *ApplicationBackupController) backupResources( resourceExport := &kdmpapi.ResourceExport{} // Adding required label for debugging labels := make(map[string]string) - labels[applicationBackupCRNameKey] = getValidLabel(backup.Name) - labels[applicationBackupCRUIDKey] = getValidLabel(getShortUID(string(backup.UID))) + labels[utils.ApplicationBackupCRNameKey] = utils.GetValidLabel(backup.Name) + labels[utils.ApplicationBackupCRUIDKey] = utils.GetValidLabel(utils.GetShortUID(string(backup.UID))) // If backup from px-backup, update the backup object details in the label - if val, ok := backup.Annotations[pxbackupAnnotationCreateByKey]; ok { - if val == pxbackupAnnotationCreateByValue { - labels[backupObjectNameKey] = getValidLabel(backup.Annotations[pxbackupObjectNameKey]) - labels[backupObjectUIDKey] = getValidLabel(backup.Annotations[pxbackupObjectUIDKey]) + if val, ok := backup.Annotations[utils.PxbackupAnnotationCreateByKey]; ok { + if val == utils.PxbackupAnnotationCreateByValue { + labels[utils.BackupObjectNameKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectNameKey]) + labels[utils.BackupObjectUIDKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectUIDKey]) } } resourceExport.Labels = labels resourceExport.Annotations = make(map[string]string) - resourceExport.Annotations[skipResourceAnnotation] = "true" - resourceExport.Name = getResourceExportCRName(prefixBackup, string(backup.UID), backup.Namespace) + resourceExport.Annotations[utils.SkipResourceAnnotation] = "true" + resourceExport.Name = getResourceExportCRName(utils.PrefixBackup, string(backup.UID), backup.Namespace) resourceExport.Namespace = backup.Namespace resourceExport.Spec.Type = kdmpapi.ResourceExportBackup source := &kdmpapi.ResourceExportObjectReference{ @@ -1671,7 +1632,7 @@ func (a *ApplicationBackupController) cleanupResources( } // Directly calling DeleteResourceExport with out checking backuplocation type. // For other backuplocation type, expecting Notfound - crName := getResourceExportCRName(prefixBackup, string(backup.UID), backup.Namespace) + crName := getResourceExportCRName(utils.PrefixBackup, string(backup.UID), backup.Namespace) err := kdmpShedOps.Instance().DeleteResourceExport(crName, backup.Namespace) if err != nil && !k8s_errors.IsNotFound(err) { errMsg := fmt.Sprintf("failed to delete data export CR [%v]: %v", crName, err) diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index aaf355175e..a9f39609ad 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -18,6 +18,7 @@ import ( "github.com/libopenstorage/stork/pkg/log" "github.com/libopenstorage/stork/pkg/objectstore" "github.com/libopenstorage/stork/pkg/resourcecollector" + "github.com/libopenstorage/stork/pkg/utils" "github.com/libopenstorage/stork/pkg/version" kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" "github.com/portworx/sched-ops/k8s/apiextensions" @@ -1276,7 +1277,7 @@ func (a *ApplicationRestoreController) restoreResources( } } else { // Check whether ResourceExport is preset or not - crName := getResourceExportCRName(prefixRestore, string(restore.UID), restore.Namespace) + crName := getResourceExportCRName(utils.PrefixRestore, string(restore.UID), restore.Namespace) resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, restore.Namespace) if err != nil { if k8s_errors.IsNotFound(err) { @@ -1284,18 +1285,18 @@ func (a *ApplicationRestoreController) restoreResources( resourceExport := &kdmpapi.ResourceExport{} // Adding required label for debugging labels := make(map[string]string) - labels[applicationRestoreCRNameKey] = getValidLabel(restore.Name) - labels[applicationRestoreCRUIDKey] = getValidLabel(getShortUID(string(restore.UID))) + labels[utils.ApplicationRestoreCRNameKey] = utils.GetValidLabel(restore.Name) + labels[utils.ApplicationRestoreCRUIDKey] = utils.GetValidLabel(utils.GetShortUID(string(restore.UID))) // If restore from px-backup, update the restore object details in the label - if val, ok := backup.Annotations[pxbackupAnnotationCreateByKey]; ok { - if val == pxbackupAnnotationCreateByValue { - labels[restoreObjectNameKey] = getValidLabel(backup.Annotations[pxbackupObjectNameKey]) - labels[restoreObjectUIDKey] = getValidLabel(backup.Annotations[pxbackupObjectUIDKey]) + if val, ok := backup.Annotations[utils.PxbackupAnnotationCreateByKey]; ok { + if val == utils.PxbackupAnnotationCreateByValue { + labels[utils.RestoreObjectNameKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectNameKey]) + labels[utils.RestoreObjectUIDKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectUIDKey]) } } resourceExport.Labels = labels resourceExport.Annotations = make(map[string]string) - resourceExport.Annotations[skipResourceAnnotation] = "true" + resourceExport.Annotations[utils.SkipResourceAnnotation] = "true" resourceExport.Name = crName resourceExport.Namespace = restore.Namespace resourceExport.Spec.Type = kdmpapi.ResourceExportBackup @@ -1524,7 +1525,7 @@ func (a *ApplicationRestoreController) cleanupResources(restore *storkapi.Applic } // Directly calling DeleteResourceExport with out checking backuplocation type. // For other backuplocation type, expecting Notfound - crName := getResourceExportCRName(prefixBackup, string(restore.UID), restore.Namespace) + crName := getResourceExportCRName(utils.PrefixBackup, string(restore.UID), restore.Namespace) err := kdmpShedOps.Instance().DeleteResourceExport(crName, restore.Namespace) if err != nil && !k8s_errors.IsNotFound(err) { errMsg := fmt.Sprintf("failed to delete data export CR [%v]: %v", crName, err) diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 33b575490c..063f30b6a5 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -6,6 +6,8 @@ import ( "github.com/portworx/sched-ops/k8s/core" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" + "github.com/aquilax/truncate" + "k8s.io/apimachinery/pkg/util/validation" "strings" ) @@ -19,6 +21,44 @@ const ( PXIncrementalCountAnnotation = "portworx.io/cloudsnap-incremental-count" // trimCRDGroupNameKey - groups name containing the string from this configmap field will be trimmed trimCRDGroupNameKey = "TRIM_CRD_GROUP_NAME" + + // PrefixBackup - prefix string that will be used for the kdmp backup job + PrefixBackup = "backup" + // PrefixRestore prefix string that will be used for the kdmp restore job + PrefixRestore = "restore" + + // KdmpAnnotationPrefix - KDMP annotation prefix + KdmpAnnotationPrefix = "kdmp.portworx.com/" + // ApplicationBackupCRNameKey - key name to store the applicationbackup CR name with KDMP annotation prefix + ApplicationBackupCRNameKey = KdmpAnnotationPrefix + "applicationbackup-cr-name" + // ApplicationBackupCRUIDKey - key name to store the applicationbackup CR UID with KDMP annotation prefix + ApplicationBackupCRUIDKey = KdmpAnnotationPrefix + "applicationbackup-cr-uid" + // BackupObjectNameKey - annotation key value for backup object name with KDMP annotation prefix + BackupObjectNameKey = KdmpAnnotationPrefix + "backupobject-name" + // BackupObjectUIDKey - annotation key value for backup object UID with KDMP annotation prefix + BackupObjectUIDKey = KdmpAnnotationPrefix + "backupobject-uid" + // ApplicationRestoreCRNameKey - key name to store the applicationrestore CR name with KDMP annotation prefix + ApplicationRestoreCRNameKey = KdmpAnnotationPrefix + "applicationrestore-cr-name" + // ApplicationRestoreCRUIDKey - key name to store the applicationrestore CR UID with KDMP annotation prefix + ApplicationRestoreCRUIDKey = KdmpAnnotationPrefix + "applicationrestore-cr-uid" + // RestoreObjectNameKey - key name to store the restore object name with KDMP annotation prefix + RestoreObjectNameKey = KdmpAnnotationPrefix + "restoreobject-name" + // RestoreObjectUIDKey - key name to store the restore object UID with KDMP annotation prefix + RestoreObjectUIDKey = KdmpAnnotationPrefix + "restoreobject-uid" + + // PxbackupAnnotationPrefix - px-backup annotation prefix + PxbackupAnnotationPrefix = "portworx.io/" + // PxbackupAnnotationCreateByKey - annotation key name to indicate whether the CR was created by px-backup or stork + PxbackupAnnotationCreateByKey = PxbackupAnnotationPrefix + "created-by" + // PxbackupAnnotationCreateByValue - annotation key value for create-by key for px-backup + PxbackupAnnotationCreateByValue = "px-backup" + + // PxbackupObjectUIDKey -annotation key name for backup object UID with px-backup prefix + PxbackupObjectUIDKey = PxbackupAnnotationPrefix + "backup-uid" + // PxbackupObjectNameKey - annotation key name for backup object name with px-backup prefix + PxbackupObjectNameKey = PxbackupAnnotationPrefix + "backup-name" + // SkipResourceAnnotation - annotation value to skip resource during resource collector + SkipResourceAnnotation = "stork.libopenstorage.org/skip-resource" ) // ParseKeyValueList parses a list of key=values string into a map @@ -82,3 +122,24 @@ func GetStorageClassNameForPVC(pvc *v1.PersistentVolumeClaim) (string, error) { } return scName, nil } + +// GetValidLabel - will validate the label to make sure the length is less 63 and contains valid label format. +// If the length is greater then 63, it will truncate to 63 character. +func GetValidLabel(labelVal string) string { + if len(labelVal) > validation.LabelValueMaxLength { + labelVal = truncate.Truncate(labelVal, validation.LabelValueMaxLength, "", truncate.PositionEnd) + // make sure the truncated value does not end with the hyphen. + labelVal = strings.Trim(labelVal, "-") + // make sure the truncated value does not end with the dot. + labelVal = strings.Trim(labelVal, ".") + } + return labelVal +} + +// GetShortUID returns the first part of the UID +func GetShortUID(uid string) string { + if len(uid) < 8 { + return "" + } + return uid[0:7] +} From 03441f9115bab24fa24d9992743e47a3838dbb36 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Mon, 26 Sep 2022 17:10:55 +0000 Subject: [PATCH 61/97] fixed the issue in accessing Resources from Status of the resourceExport definition. --- pkg/applicationmanager/controllers/applicationrestore.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index a9f39609ad..935c970afd 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -1354,7 +1354,7 @@ func (a *ApplicationRestoreController) restoreResources( return err case kdmpapi.ResourceExportStatusSuccessful: // Modify to have subresource level updating - for _, resource := range resourceExport.Spec.Resources { + for _, resource := range resourceExport.Status.Resources { a.updateResourceStatusFromRestoreCR( restore, resource, From d2ff8dae77caa90ecc190a247a7cc1518b14eea6 Mon Sep 17 00:00:00 2001 From: Lalatendu Das Date: Mon, 3 Oct 2022 07:21:42 +0000 Subject: [PATCH 62/97] pb-3100: Enable Generic backup for NFS based backuplocation If the backuplocation type is NFS then we will take a Generic backup for any type volume. Signed-off-by: Lalatendu Das --- drivers/volume/portworx/portworx.go | 4 ++++ pkg/applicationmanager/controllers/applicationbackup.go | 9 +++++++++ 2 files changed, 13 insertions(+) diff --git a/drivers/volume/portworx/portworx.go b/drivers/volume/portworx/portworx.go index 9bd73b5b24..baacad5e17 100644 --- a/drivers/volume/portworx/portworx.go +++ b/drivers/volume/portworx/portworx.go @@ -677,6 +677,10 @@ func (p *portworx) GetClusterID() (string, error) { } func (p *portworx) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string) bool { + if cmBackupType == storkapi.ApplicationBackupGeneric { + // If user has forced the backupType in config map, default to generic always + return false + } return p.IsSupportedPVC(coreOps, pvc, true) } diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index 23833343ee..bf2027cb95 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -568,6 +568,15 @@ func (a *ApplicationBackupController) backupVolumes(backup *stork_api.Applicatio continue } var driverName string + backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) + if err != nil { + return err + } + // Generic Backup type is forced for all backup taken on a NFS backuplocation. + // This change will make portworx volume also to follow kdmp path. + if backupLocation.Location.Type == stork_api.BackupLocationNFS { + driverType = stork_api.ApplicationBackupGeneric + } driverName, err = volume.GetPVCDriverForBackup(core.Instance(), &pvc, driverType, backup.Spec.BackupType) if err != nil { // Skip unsupported PVCs From 0779be70f128fd4aa2c3721d5e318b2d476cae59 Mon Sep 17 00:00:00 2001 From: Prashanth Kumar Date: Mon, 10 Oct 2022 07:03:17 +0000 Subject: [PATCH 63/97] Temporarily disabling namespace creation in vol restore path --- pkg/applicationmanager/controllers/applicationrestore.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index 935c970afd..2411f81d92 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -279,6 +279,7 @@ func (a *ApplicationRestoreController) handle(ctx context.Context, restore *stor return nil } + /* TODO: Temp disabling it, would move to nfs path with restore vol changes err = a.verifyNamespaces(restore) if err != nil { log.ApplicationRestoreLog(restore).Errorf(err.Error()) @@ -287,7 +288,7 @@ func (a *ApplicationRestoreController) handle(ctx context.Context, restore *stor string(storkapi.ApplicationRestoreStatusFailed), err.Error()) return nil - } + }*/ switch restore.Status.Stage { case storkapi.ApplicationRestoreStageInitial: From bb614f8c6681af11214b81f6b5a0e8a59349fa85 Mon Sep 17 00:00:00 2001 From: diptiranjan Date: Wed, 12 Oct 2022 18:57:51 +0530 Subject: [PATCH 64/97] PB-3155: Updating the success status in application restore CR from resource export CR. --- .../controllers/applicationrestore.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index 2411f81d92..c100cc2d56 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -1266,6 +1266,7 @@ func (a *ApplicationRestoreController) restoreResources( logrus.Errorf("error in checking backuplocation type") } + doCleanup := true if !nfs { objects, err := a.downloadResources(backup, restore.Spec.BackupLocation, restore.Namespace) if err != nil { @@ -1336,6 +1337,7 @@ func (a *ApplicationRestoreController) restoreResources( } else { var message string // Check the status of the resourceExport CR and update it to the applicationBackup CR + logrus.Debugf("resource export: %s, status: %s", resourceExport.Name, resourceExport.Status.Status) switch resourceExport.Status.Status { case kdmpapi.ResourceExportStatusFailed: message = fmt.Sprintf("Error applying resources: %v", err) @@ -1362,19 +1364,29 @@ func (a *ApplicationRestoreController) restoreResources( storkapi.ApplicationRestoreStatusType(resource.Status), resource.Reason) } + restore.Status.Stage = storkapi.ApplicationRestoreStageFinal + restore.Status.FinishTimestamp = metav1.Now() + restore.Status.Status = storkapi.ApplicationRestoreStatusSuccessful + restore.Status.Reason = "Volumes and resources were restored up successfully" case kdmpapi.ResourceExportStatusInitial: + doCleanup = false case kdmpapi.ResourceExportStatusPending: + doCleanup = false case kdmpapi.ResourceExportStatusInProgress: restore.Status.LastUpdateTimestamp = metav1.Now() + doCleanup = false } err = a.client.Update(context.TODO(), restore) if err != nil { return err } - return nil } } + + if !doCleanup { + return nil + } // Before updating to final stage, cleanup generic backup CRs, if any. err = a.cleanupResources(restore) if err != nil { @@ -1526,7 +1538,7 @@ func (a *ApplicationRestoreController) cleanupResources(restore *storkapi.Applic } // Directly calling DeleteResourceExport with out checking backuplocation type. // For other backuplocation type, expecting Notfound - crName := getResourceExportCRName(utils.PrefixBackup, string(restore.UID), restore.Namespace) + crName := getResourceExportCRName(utils.PrefixRestore, string(restore.UID), restore.Namespace) err := kdmpShedOps.Instance().DeleteResourceExport(crName, restore.Namespace) if err != nil && !k8s_errors.IsNotFound(err) { errMsg := fmt.Sprintf("failed to delete data export CR [%v]: %v", crName, err) From 295578db20030c45cf26e905c0a30d6297fd15b8 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Thu, 20 Oct 2022 18:41:23 +0000 Subject: [PATCH 65/97] commented out the namespace related api temporarily --- pkg/applicationmanager/controllers/applicationrestore.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index c100cc2d56..f7dd1ee99b 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -106,6 +106,7 @@ func (a *ApplicationRestoreController) setDefaults(restore *storkapi.Application return nil } +/* func (a *ApplicationRestoreController) verifyNamespaces(restore *storkapi.ApplicationRestore) error { // Check whether namespace is allowed to be restored to before each stage // Restrict restores to only the namespace that the object belongs @@ -220,7 +221,7 @@ func (a *ApplicationRestoreController) createNamespaces(backup *storkapi.Applica } return nil } - +*/ // Reconcile updates for ApplicationRestore objects. func (a *ApplicationRestoreController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { logrus.Infof("Reconciling ApplicationRestore %s/%s", request.Namespace, request.Name) @@ -330,6 +331,7 @@ func (a *ApplicationRestoreController) handle(ctx context.Context, restore *stor return nil } +/* func (a *ApplicationRestoreController) namespaceRestoreAllowed(restore *storkapi.ApplicationRestore) bool { // Restrict restores to only the namespace that the object belongs // except for the namespace designated by the admin @@ -342,7 +344,7 @@ func (a *ApplicationRestoreController) namespaceRestoreAllowed(restore *storkapi } return true } - +*/ func (a *ApplicationRestoreController) getDriversForRestore(restore *storkapi.ApplicationRestore) map[string]bool { drivers := make(map[string]bool) for _, volumeInfo := range restore.Status.Volumes { From 201228afb95fbcc3d9437751aad5d4dd39fda6de Mon Sep 17 00:00:00 2001 From: diptiranjan Date: Thu, 20 Oct 2022 19:39:46 +0530 Subject: [PATCH 66/97] PB-3144: Not creating namespaces for nfs backuplocation type in controller. --- .../controllers/applicationbackup.go | 6 ++-- .../controllers/applicationrestore.go | 36 ++++++++++++------- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index bf2027cb95..e17c540fc1 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -1348,7 +1348,7 @@ func (a *ApplicationBackupController) backupResources( if nfs { // Check whether ResourceExport is preset or not crName := getResourceExportCRName(utils.PrefixBackup, string(backup.UID), backup.Namespace) - resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, backup.Namespace) + resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, a.backupAdminNamespace) if err != nil { if k8s_errors.IsNotFound(err) { // create resource export CR @@ -1368,7 +1368,7 @@ func (a *ApplicationBackupController) backupResources( resourceExport.Annotations = make(map[string]string) resourceExport.Annotations[utils.SkipResourceAnnotation] = "true" resourceExport.Name = getResourceExportCRName(utils.PrefixBackup, string(backup.UID), backup.Namespace) - resourceExport.Namespace = backup.Namespace + resourceExport.Namespace = a.backupAdminNamespace resourceExport.Spec.Type = kdmpapi.ResourceExportBackup source := &kdmpapi.ResourceExportObjectReference{ APIVersion: backup.APIVersion, @@ -1642,7 +1642,7 @@ func (a *ApplicationBackupController) cleanupResources( // Directly calling DeleteResourceExport with out checking backuplocation type. // For other backuplocation type, expecting Notfound crName := getResourceExportCRName(utils.PrefixBackup, string(backup.UID), backup.Namespace) - err := kdmpShedOps.Instance().DeleteResourceExport(crName, backup.Namespace) + err := kdmpShedOps.Instance().DeleteResourceExport(crName, a.backupAdminNamespace) if err != nil && !k8s_errors.IsNotFound(err) { errMsg := fmt.Sprintf("failed to delete data export CR [%v]: %v", crName, err) log.ApplicationBackupLog(backup).Errorf("%v", errMsg) diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index f7dd1ee99b..bd674a5337 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -280,16 +280,28 @@ func (a *ApplicationRestoreController) handle(ctx context.Context, restore *stor return nil } - /* TODO: Temp disabling it, would move to nfs path with restore vol changes - err = a.verifyNamespaces(restore) + backup, err := storkops.Instance().GetApplicationBackup(restore.Spec.BackupName, restore.Namespace) if err != nil { - log.ApplicationRestoreLog(restore).Errorf(err.Error()) - a.recorder.Event(restore, - v1.EventTypeWarning, - string(storkapi.ApplicationRestoreStatusFailed), - err.Error()) - return nil - }*/ + log.ApplicationRestoreLog(restore).Errorf("Error getting backup: %v", err) + return err + } + + nfs, err := IsNFSBackuplocationType(backup) + if err != nil { + logrus.Errorf("error in checking backuplocation type") + } + + if !nfs { + err = a.verifyNamespaces(restore) + if err != nil { + log.ApplicationRestoreLog(restore).Errorf(err.Error()) + a.recorder.Event(restore, + v1.EventTypeWarning, + string(storkapi.ApplicationRestoreStatusFailed), + err.Error()) + return nil + } + } switch restore.Status.Stage { case storkapi.ApplicationRestoreStageInitial: @@ -1282,7 +1294,7 @@ func (a *ApplicationRestoreController) restoreResources( } else { // Check whether ResourceExport is preset or not crName := getResourceExportCRName(utils.PrefixRestore, string(restore.UID), restore.Namespace) - resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, restore.Namespace) + resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, a.restoreAdminNamespace) if err != nil { if k8s_errors.IsNotFound(err) { // create resource export CR @@ -1302,7 +1314,7 @@ func (a *ApplicationRestoreController) restoreResources( resourceExport.Annotations = make(map[string]string) resourceExport.Annotations[utils.SkipResourceAnnotation] = "true" resourceExport.Name = crName - resourceExport.Namespace = restore.Namespace + resourceExport.Namespace = a.restoreAdminNamespace resourceExport.Spec.Type = kdmpapi.ResourceExportBackup source := &kdmpapi.ResourceExportObjectReference{ APIVersion: restore.APIVersion, @@ -1541,7 +1553,7 @@ func (a *ApplicationRestoreController) cleanupResources(restore *storkapi.Applic // Directly calling DeleteResourceExport with out checking backuplocation type. // For other backuplocation type, expecting Notfound crName := getResourceExportCRName(utils.PrefixRestore, string(restore.UID), restore.Namespace) - err := kdmpShedOps.Instance().DeleteResourceExport(crName, restore.Namespace) + err := kdmpShedOps.Instance().DeleteResourceExport(crName, a.restoreAdminNamespace) if err != nil && !k8s_errors.IsNotFound(err) { errMsg := fmt.Sprintf("failed to delete data export CR [%v]: %v", crName, err) log.ApplicationRestoreLog(restore).Errorf("%v", errMsg) From 6a654942a4efd9c776d0f57d22e28f462fb2826d Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Wed, 2 Nov 2022 06:13:54 +0000 Subject: [PATCH 67/97] Uncommented the definition of namespaceRestoreAllowed and verifyNamespaces to avoid compilation error --- pkg/applicationmanager/controllers/applicationrestore.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index bd674a5337..b2e1ffad7a 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -106,7 +106,6 @@ func (a *ApplicationRestoreController) setDefaults(restore *storkapi.Application return nil } -/* func (a *ApplicationRestoreController) verifyNamespaces(restore *storkapi.ApplicationRestore) error { // Check whether namespace is allowed to be restored to before each stage // Restrict restores to only the namespace that the object belongs @@ -221,7 +220,7 @@ func (a *ApplicationRestoreController) createNamespaces(backup *storkapi.Applica } return nil } -*/ + // Reconcile updates for ApplicationRestore objects. func (a *ApplicationRestoreController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { logrus.Infof("Reconciling ApplicationRestore %s/%s", request.Namespace, request.Name) @@ -343,7 +342,6 @@ func (a *ApplicationRestoreController) handle(ctx context.Context, restore *stor return nil } -/* func (a *ApplicationRestoreController) namespaceRestoreAllowed(restore *storkapi.ApplicationRestore) bool { // Restrict restores to only the namespace that the object belongs // except for the namespace designated by the admin @@ -356,7 +354,6 @@ func (a *ApplicationRestoreController) namespaceRestoreAllowed(restore *storkapi } return true } -*/ func (a *ApplicationRestoreController) getDriversForRestore(restore *storkapi.ApplicationRestore) map[string]bool { drivers := make(map[string]bool) for _, volumeInfo := range restore.Status.Volumes { From 12447294710433804589317b64cdd88534c00289 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Wed, 2 Nov 2022 07:24:25 +0000 Subject: [PATCH 68/97] fixed compilation error came as part of merger and rebase --- pkg/applicationmanager/controllers/applicationbackup.go | 1 - pkg/utils/utils.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index e17c540fc1..977c6d5ce3 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "github.com/libopenstorage/stork/pkg/utils" "math" "os" "path/filepath" diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 063f30b6a5..90a862791b 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -2,11 +2,11 @@ package utils import ( "fmt" + "github.com/aquilax/truncate" "github.com/libopenstorage/stork/drivers" "github.com/portworx/sched-ops/k8s/core" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" - "github.com/aquilax/truncate" "k8s.io/apimachinery/pkg/util/validation" "strings" ) From 680ba0c9a332ad2819b93945c34aa046c468f5a5 Mon Sep 17 00:00:00 2001 From: Prashanth Kumar Date: Wed, 26 Oct 2022 18:41:55 +0000 Subject: [PATCH 69/97] NFS volume restore implementation - NFS vol restore support - Support for creating restore pvc using nfs executor job - Trigerring kdmp restore after pvc creation for vol restore --- drivers/volume/kdmp/kdmp.go | 42 +-- drivers/volume/portworx/portworx.go | 1 - .../controllers/applicationrestore.go | 311 ++++++++++++++---- pkg/utils/utils.go | 2 + 4 files changed, 268 insertions(+), 88 deletions(-) diff --git a/drivers/volume/kdmp/kdmp.go b/drivers/volume/kdmp/kdmp.go index fec46b2036..76d250a3e7 100644 --- a/drivers/volume/kdmp/kdmp.go +++ b/drivers/volume/kdmp/kdmp.go @@ -637,6 +637,7 @@ func (k *kdmp) StartRestore( volumeBackupInfos []*storkapi.ApplicationBackupVolumeInfo, objects []runtime.Unstructured, ) ([]*storkapi.ApplicationRestoreVolumeInfo, error) { + funct := "kdmp.StartRestore" log.ApplicationRestoreLog(restore).Debugf("started generic restore: %v", restore.Name) volumeInfos := make([]*storkapi.ApplicationRestoreVolumeInfo, 0) nodes, err := core.Instance().GetNodes() @@ -681,27 +682,33 @@ func (k *kdmp) StartRestore( destFullZoneName = strings.Join(splitDestRegion, "-") volumeInfo.Zones = append(volumeInfo.Zones, destFullZoneName) } - - // get corresponding pvc object from objects list - pvc, err := storkvolume.GetPVCFromObjects(objects, bkpvInfo) - if err != nil { - return nil, err - } - if !nonSupportedProvider { - for _, node := range nodes.Items { - zone := node.Labels[v1.LabelTopologyZone] - if zone == destFullZoneName { - pvc.Annotations[storageNodeAnnotation] = node.Name - } - } - } - val, ok := restore.Spec.NamespaceMapping[bkpvInfo.Namespace] if !ok { return nil, fmt.Errorf("restore namespace mapping not found: %s", bkpvInfo.Namespace) } restoreNamespace := val + pvc := &v1.PersistentVolumeClaim{} + + if objects != nil { + // get corresponding pvc object from objects list + pvc, err = storkvolume.GetPVCFromObjects(objects, bkpvInfo) + if err != nil { + return nil, err + } + if !nonSupportedProvider { + for _, node := range nodes.Items { + zone := node.Labels[v1.LabelTopologyZone] + if zone == destFullZoneName { + pvc.Annotations[storageNodeAnnotation] = node.Name + } + } + } + pvc.Namespace = restoreNamespace + } else { + pvc.Name = bkpvInfo.PersistentVolumeClaim + pvc.Namespace = restoreNamespace + } volumeInfo.PersistentVolumeClaim = bkpvInfo.PersistentVolumeClaim volumeInfo.PersistentVolumeClaimUID = bkpvInfo.PersistentVolumeClaimUID volumeInfo.SourceNamespace = bkpvInfo.Namespace @@ -722,7 +729,6 @@ func (k *kdmp) StartRestore( labels[restoreObjectUIDKey] = utils.GetValidLabel(restore.Annotations[utils.PxbackupObjectUIDKey]) } } - volBackup := &kdmpapi.VolumeBackup{} volBackup.Labels = labels volBackup.Annotations = make(map[string]string) @@ -742,8 +748,6 @@ func (k *kdmp) StartRestore( return nil, err } - pvc.Namespace = restoreNamespace - backup, err := storkops.Instance().GetApplicationBackup(restore.Spec.BackupName, restore.Namespace) if err != nil { return nil, fmt.Errorf("unable to get applicationbackup cr %s/%s: %v", restore.Namespace, restore.Spec.BackupName, err) @@ -789,6 +793,7 @@ func (k *kdmp) StartRestore( Namespace: restoreNamespace, APIVersion: "v1", } + logrus.Tracef("%s de cr name [%v/%v]", funct, dataExport.Namespace, dataExport.Name) if _, err := kdmpShedOps.Instance().CreateDataExport(dataExport); err != nil { logrus.Errorf("failed to create DataExport CR: %v", err) return volumeInfos, err @@ -848,7 +853,6 @@ func (k *kdmp) GetRestoreStatus(restore *storkapi.ApplicationRestore) ([]*storka volumeInfos = append(volumeInfos, vInfo) continue } - if dataExport.Status.TransferID == "" { vInfo.Status = storkapi.ApplicationRestoreStatusInitial vInfo.Reason = "Volume restore not started yet" diff --git a/drivers/volume/portworx/portworx.go b/drivers/volume/portworx/portworx.go index baacad5e17..93ad6a1a38 100644 --- a/drivers/volume/portworx/portworx.go +++ b/drivers/volume/portworx/portworx.go @@ -678,7 +678,6 @@ func (p *portworx) GetClusterID() (string, error) { func (p *portworx) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string) bool { if cmBackupType == storkapi.ApplicationBackupGeneric { - // If user has forced the backupType in config map, default to generic always return false } return p.IsSupportedPVC(coreOps, pvc, true) diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index b2e1ffad7a..e5b7ba97d8 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -412,6 +412,7 @@ func (a *ApplicationRestoreController) updateRestoreCRInVolumeStage( if volumeInfos != nil { restore.Status.Volumes = append(restore.Status.Volumes, volumeInfos...) } + err = a.client.Update(context.TODO(), restore) if err != nil { time.Sleep(retrySleep) @@ -426,7 +427,59 @@ func (a *ApplicationRestoreController) updateRestoreCRInVolumeStage( return restore, nil } +func convertResourceVolInfoToAppBkpVolInfo( + volInfo []*kdmpapi.ResourceBackupVolumeInfo, +) (resVolInfo []*storkapi.ApplicationBackupVolumeInfo) { + restoreVolumeInfos := make([]*storkapi.ApplicationBackupVolumeInfo, 0) + for _, vol := range volInfo { + restoreVolInfo := &storkapi.ApplicationBackupVolumeInfo{} + restoreVolInfo.PersistentVolumeClaim = vol.PersistentVolumeClaim + restoreVolInfo.PersistentVolumeClaimUID = vol.PersistentVolumeClaimUID + restoreVolInfo.Namespace = vol.Namespace + restoreVolInfo.Volume = vol.Volume + restoreVolInfo.BackupID = vol.BackupID + restoreVolInfo.DriverName = vol.DriverName + restoreVolInfo.Status = storkapi.ApplicationBackupStatusType(vol.Status) + restoreVolInfo.Zones = vol.Zones + restoreVolInfo.Reason = vol.Reason + restoreVolInfo.Options = vol.Options + restoreVolInfo.TotalSize = vol.TotalSize + restoreVolInfo.ActualSize = vol.ActualSize + restoreVolInfo.StorageClass = vol.StorageClass + restoreVolInfo.Provisioner = vol.Provisioner + restoreVolInfo.VolumeSnapshot = vol.VolumeSnapshot + restoreVolumeInfos = append(restoreVolumeInfos, restoreVolInfo) + } + + return restoreVolumeInfos +} + +func convertResourceVolInfoToAppRestoreVolInfo( + volInfo []*kdmpapi.ResourceRestoreVolumeInfo, +) (resVolInfo []*storkapi.ApplicationRestoreVolumeInfo) { + restoreVolumeInfos := make([]*storkapi.ApplicationRestoreVolumeInfo, 0) + for _, vol := range volInfo { + restoreVolInfo := &storkapi.ApplicationRestoreVolumeInfo{} + restoreVolInfo.PersistentVolumeClaim = vol.PersistentVolumeClaim + restoreVolInfo.PersistentVolumeClaimUID = vol.PersistentVolumeClaimUID + restoreVolInfo.DriverName = vol.DriverName + restoreVolInfo.Status = storkapi.ApplicationRestoreStatusType(vol.Status) + restoreVolInfo.Zones = vol.Zones + restoreVolInfo.Reason = vol.Reason + restoreVolInfo.Options = vol.Options + restoreVolInfo.TotalSize = vol.TotalSize + restoreVolInfo.SourceVolume = vol.SourceVolume + restoreVolInfo.SourceNamespace = vol.SourceNamespace + restoreVolInfo.RestoreVolume = vol.RestoreVolume + + restoreVolumeInfos = append(restoreVolumeInfos, restoreVolInfo) + } + + return restoreVolumeInfos +} + func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.ApplicationRestore) error { + funct := "restoreVolumes" restore.Status.Stage = storkapi.ApplicationRestoreStageVolumes backup, err := storkops.Instance().GetApplicationBackup(restore.Spec.BackupName, restore.Namespace) @@ -492,93 +545,207 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat namespacedName.Namespace = restore.Namespace namespacedName.Name = restore.Name restoreCompleteList := make([]*storkapi.ApplicationRestoreVolumeInfo, 0) + nfs, err := IsNFSBackuplocationType(backup) + if err != nil { + logrus.Errorf("error in checking backuplocation type") + } if len(restore.Status.Volumes) != pvcCount { + // Here backupVolumeInfoMappings is framed based on driver name mapping, hence startRestore() + // gets called once per driver + var sErr error for driverName, vInfos := range backupVolumeInfoMappings { + restoreVolumeInfos := make([]*storkapi.ApplicationRestoreVolumeInfo, 0) backupVolInfos := vInfos - existingRestoreVolInfos := make([]*storkapi.ApplicationRestoreVolumeInfo, 0) driver, err := volume.Get(driverName) - if err != nil { - return err - } - - // For each driver, check if it needs any additional resources to be - // restored before starting the volume restore - objects, err := a.downloadResources(backup, restore.Spec.BackupLocation, restore.Namespace) - if err != nil { - log.ApplicationRestoreLog(restore).Errorf("Error downloading resources: %v", err) - return err - } - - // Skip pv/pvc if replacepolicy is set to retain to avoid creating - if restore.Spec.ReplacePolicy == storkapi.ApplicationRestoreReplacePolicyRetain { - backupVolInfos, existingRestoreVolInfos, err = a.skipVolumesFromRestoreList(restore, objects, driver, vInfos) + // BL NFS + kdmp = nfs code path + // s3 + kdmp = legacy code path + // BL NFS + EBS/GKE/Azure = legacy code path + // s3 + EBS/GKE/Azure = legacy code path + if !nfs || (nfs && driverName != volume.KDMPDriverName) { + existingRestoreVolInfos := make([]*storkapi.ApplicationRestoreVolumeInfo, 0) + //driver, err := volume.Get(driverName) if err != nil { - log.ApplicationRestoreLog(restore).Errorf("Error while checking pvcs: %v", err) return err } - } - - preRestoreObjects, err := driver.GetPreRestoreResources(backup, restore, objects) - if err != nil { - log.ApplicationRestoreLog(restore).Errorf("Error getting PreRestore Resources: %v", err) - return err - } - - // pvc creation is not part of kdmp - if driverName != "kdmp" { - if err := a.applyResources(restore, preRestoreObjects); err != nil { + // For each driver, check if it needs any additional resources to be + // restored before starting the volume restore + objects, err := a.downloadResources(backup, restore.Spec.BackupLocation, restore.Namespace) + if err != nil { + log.ApplicationRestoreLog(restore).Errorf("Error downloading resources: %v", err) return err } - } - - // Pre-delete resources for CSI driver - if (driverName == "csi" || driverName == "kdmp") && restore.Spec.ReplacePolicy == storkapi.ApplicationRestoreReplacePolicyDelete { - objectMap := storkapi.CreateObjectsMap(restore.Spec.IncludeResources) - objectBasedOnIncludeResources := make([]runtime.Unstructured, 0) - for _, o := range objects { - skip, err := a.resourceCollector.PrepareResourceForApply( - o, - objects, - objectMap, - restore.Spec.NamespaceMapping, - nil, // no need to set storage class mappings at this stage - nil, - restore.Spec.IncludeOptionalResourceTypes, - nil, - ) + // Skip pv/pvc if replacepolicy is set to retain to avoid creating + if restore.Spec.ReplacePolicy == storkapi.ApplicationRestoreReplacePolicyRetain { + backupVolInfos, existingRestoreVolInfos, err = a.skipVolumesFromRestoreList(restore, objects, driver, vInfos) if err != nil { + log.ApplicationRestoreLog(restore).Errorf("Error while checking pvcs: %v", err) return err } - if !skip { - objectBasedOnIncludeResources = append( - objectBasedOnIncludeResources, + } + preRestoreObjects, err := driver.GetPreRestoreResources(backup, restore, objects) + if err != nil { + log.ApplicationRestoreLog(restore).Errorf("Error getting PreRestore Resources: %v", err) + return err + } + + // Pre-delete resources for CSI driver + if (driverName == "csi" || driverName == volume.KDMPDriverName) && restore.Spec.ReplacePolicy == storkapi.ApplicationRestoreReplacePolicyDelete { + objectMap := storkapi.CreateObjectsMap(restore.Spec.IncludeResources) + objectBasedOnIncludeResources := make([]runtime.Unstructured, 0) + for _, o := range objects { + skip, err := a.resourceCollector.PrepareResourceForApply( o, + objects, + objectMap, + restore.Spec.NamespaceMapping, + nil, // no need to set storage class mappings at this stage + nil, + restore.Spec.IncludeOptionalResourceTypes, + nil, ) + if err != nil { + return err + } + if !skip { + objectBasedOnIncludeResources = append( + objectBasedOnIncludeResources, + o, + ) + } + } + tempObjects, err := a.getNamespacedObjectsToDelete( + restore, + objectBasedOnIncludeResources, + ) + if err != nil { + return err + } + err = a.resourceCollector.DeleteResources( + a.dynamicInterface, + tempObjects) + if err != nil { + return err } } - tempObjects, err := a.getNamespacedObjectsToDelete( - restore, - objectBasedOnIncludeResources, - ) + // pvc creation is not part of kdmp + if driverName != volume.KDMPDriverName { + if err := a.applyResources(restore, preRestoreObjects); err != nil { + return err + } + } + restoreCompleteList = append(restoreCompleteList, existingRestoreVolInfos...) + restoreVolumeInfos, sErr = driver.StartRestore(restore, backupVolInfos, preRestoreObjects) if err != nil { return err } - err = a.resourceCollector.DeleteResources( - a.dynamicInterface, - tempObjects) + } + // Check whether ResourceExport is preset or not + if nfs && driverName == volume.KDMPDriverName { + err = a.client.Update(context.TODO(), restore) if err != nil { - return err + time.Sleep(retrySleep) + return nil + } + crName := getResourceExportCRName(utils.PrefixNFSVolRestore, string(restore.UID), restore.Namespace) + resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, restore.Namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + // create resource export CR + resourceExport = &kdmpapi.ResourceExport{} + // Adding required label for debugging + labels := make(map[string]string) + labels[utils.ApplicationRestoreCRNameKey] = utils.GetValidLabel(restore.Name) + labels[utils.ApplicationRestoreCRUIDKey] = utils.GetValidLabel(utils.GetShortUID(string(restore.UID))) + // If restore from px-backup, update the restore object details in the label + if val, ok := backup.Annotations[utils.PxbackupAnnotationCreateByKey]; ok { + if val == utils.PxbackupAnnotationCreateByValue { + labels[utils.RestoreObjectNameKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectNameKey]) + labels[utils.RestoreObjectUIDKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectUIDKey]) + } + } + resourceExport.Labels = labels + resourceExport.Annotations = make(map[string]string) + resourceExport.Annotations[utils.SkipResourceAnnotation] = "true" + resourceExport.Name = crName + resourceExport.Namespace = restore.Namespace + resourceExport.Spec.Type = kdmpapi.ResourceExportBackup + // TODO: In the restore path we need to change source and destination ref as it is confusing now + // Usually dest means where it's backed up or restore to + source := &kdmpapi.ResourceExportObjectReference{ + APIVersion: restore.APIVersion, + Kind: restore.Kind, + Namespace: restore.Namespace, + Name: restore.Name, + } + backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) + if err != nil { + return fmt.Errorf("error getting backup location path: %v", err) + } + destination := &kdmpapi.ResourceExportObjectReference{ + // TODO: GetBackupLocation is not returning APIVersion and kind. + // Hardcoding for now. + // APIVersion: backupLocation.APIVersion, + // Kind: backupLocation.Kind, + APIVersion: "stork.libopenstorage.org/v1alpha1", + Kind: "BackupLocation", + Namespace: backupLocation.Namespace, + Name: backupLocation.Name, + } + resourceExport.Spec.Source = *source + resourceExport.Spec.Destination = *destination + _, err = kdmpShedOps.Instance().CreateResourceExport(resourceExport) + if err != nil { + logrus.Errorf("failed to create resourceExport CR %v: %v", crName, err) + return err + } + return nil + } + logrus.Errorf("%s error reading resourceExport CR %v: %v", funct, crName, err) + return nil + } else { + var message string + logrus.Infof("%s re cr %v status %v", funct, crName, resourceExport.Status.Status) + switch resourceExport.Status.Status { + case kdmpapi.ResourceExportStatusFailed: + message = fmt.Sprintf("%s Error creating CR %v for pvc creation: %v", funct, crName, err) + restore.Status.Status = storkapi.ApplicationRestoreStatusFailed + restore.Status.Stage = storkapi.ApplicationRestoreStageFinal + restore.Status.Reason = message + restore.Status.LastUpdateTimestamp = metav1.Now() + err = a.client.Update(context.TODO(), restore) + if err != nil { + return err + } + a.recorder.Event(restore, + v1.EventTypeWarning, + string(storkapi.ApplicationRestoreStatusFailed), + message) + log.ApplicationRestoreLog(restore).Errorf(message) + return err + case kdmpapi.ResourceExportStatusInitial: + return nil + case kdmpapi.ResourceExportStatusPending: + return nil + case kdmpapi.ResourceExportStatusInProgress: + return nil + case kdmpapi.ResourceExportStatusSuccessful: + backupVolInfos := convertResourceVolInfoToAppBkpVolInfo(resourceExport.VolumesInfo) + existingRestoreVolInfos := convertResourceVolInfoToAppRestoreVolInfo(resourceExport.ExistingVolumesInfo) + restoreCompleteList = append(restoreCompleteList, existingRestoreVolInfos...) + restoreVolumeInfos, sErr = driver.StartRestore(restore, backupVolInfos, nil) + default: + logrus.Infof("%s still valid re CR[%v]stage not available", funct, crName) + return nil + } } } - - restoreCompleteList = append(restoreCompleteList, existingRestoreVolInfos...) - restoreVolumeInfos, err := driver.StartRestore(restore, backupVolInfos, preRestoreObjects) - if err != nil { - message := fmt.Sprintf("Error starting Application Restore for volumes: %v", err) + if sErr != nil { + logrus.Infof("%s sErr: %v", funct, sErr) + message := fmt.Sprintf("Error starting Application Restore for volumes: %v", sErr) log.ApplicationRestoreLog(restore).Errorf(message) - if _, ok := err.(*volume.ErrStorageProviderBusy); ok { + if _, ok := sErr.(*volume.ErrStorageProviderBusy); ok { msg := fmt.Sprintf("Volume restores are in progress. Restores are failing for some volumes"+ - " since the storage provider is busy. Restore will be retried. Error: %v", err) + " since the storage provider is busy. Restore will be retried. Error: %v", sErr) a.recorder.Event(restore, v1.EventTypeWarning, string(storkapi.ApplicationRestoreStatusInProgress), @@ -606,6 +773,7 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat return err } restoreCompleteList = append(restoreCompleteList, restoreVolumeInfos...) + logrus.Tracef("restoreCompleteList %+v", restoreCompleteList) } restore, err = a.updateRestoreCRInVolumeStage( namespacedName, @@ -618,6 +786,7 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat return err } } + inProgress := false // Skip checking status if no volumes are being restored if len(restore.Status.Volumes) != 0 { @@ -662,6 +831,7 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat restore.Status.FinishTimestamp = metav1.Now() restore.Status.Status = storkapi.ApplicationRestoreStatusFailed restore.Status.Reason = vInfo.Reason + break } else if vInfo.Status == storkapi.ApplicationRestoreStatusSuccessful { a.recorder.Event(restore, @@ -671,7 +841,6 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat } } } - // Return if we have any volume restores still in progress if inProgress || len(restore.Status.Volumes) != pvcCount { return nil @@ -694,7 +863,6 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat return err } } - restore.Status.LastUpdateTimestamp = metav1.Now() // Only on success compute the total restore size for _, vInfo := range restore.Status.Volumes { @@ -1271,10 +1439,10 @@ func (a *ApplicationRestoreController) restoreResources( log.ApplicationRestoreLog(restore).Errorf("Error getting backup: %v", err) return err } - nfs, err := IsNFSBackuplocationType(backup) if err != nil { logrus.Errorf("error in checking backuplocation type") + return err } doCleanup := true @@ -1289,7 +1457,7 @@ func (a *ApplicationRestoreController) restoreResources( return err } } else { - // Check whether ResourceExport is preset or not + // Check whether ResourceExport is present or not crName := getResourceExportCRName(utils.PrefixRestore, string(restore.UID), restore.Namespace) resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, a.restoreAdminNamespace) if err != nil { @@ -1552,7 +1720,14 @@ func (a *ApplicationRestoreController) cleanupResources(restore *storkapi.Applic crName := getResourceExportCRName(utils.PrefixRestore, string(restore.UID), restore.Namespace) err := kdmpShedOps.Instance().DeleteResourceExport(crName, a.restoreAdminNamespace) if err != nil && !k8s_errors.IsNotFound(err) { - errMsg := fmt.Sprintf("failed to delete data export CR [%v]: %v", crName, err) + errMsg := fmt.Sprintf("failed to delete resource export CR [%v]: %v", crName, err) + log.ApplicationRestoreLog(restore).Errorf("%v", errMsg) + return err + } + crName = getResourceExportCRName(utils.PrefixNFSVolRestore, string(restore.UID), restore.Namespace) + err = kdmpShedOps.Instance().DeleteResourceExport(crName, restore.Namespace) + if err != nil && !k8s_errors.IsNotFound(err) { + errMsg := fmt.Sprintf("failed to delete resource export CR [%v]: %v", crName, err) log.ApplicationRestoreLog(restore).Errorf("%v", errMsg) return err } diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 90a862791b..115a40c110 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -26,6 +26,8 @@ const ( PrefixBackup = "backup" // PrefixRestore prefix string that will be used for the kdmp restore job PrefixRestore = "restore" + // PrefixNFSVolRestore prefix string that will be used for nfs vol restore + PrefixNFSVolRestore = "nfs-vol-restore" // KdmpAnnotationPrefix - KDMP annotation prefix KdmpAnnotationPrefix = "kdmp.portworx.com/" From ee5959d56447f8a7e4abc71d65e2b47290281d90 Mon Sep 17 00:00:00 2001 From: diptiranjanpx Date: Thu, 3 Nov 2022 16:37:13 +0000 Subject: [PATCH 70/97] PB-3211: Mapping nfs restore statuses to stork restore statuses and update. --- .../controllers/applicationrestore.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index e5b7ba97d8..6c3826a00e 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -1087,9 +1087,20 @@ func (a *ApplicationRestoreController) updateResourceStatus( func (a *ApplicationRestoreController) updateResourceStatusFromRestoreCR( restore *storkapi.ApplicationRestore, resource *kdmpapi.ResourceRestoreResourceInfo, - status storkapi.ApplicationRestoreStatusType, + status kdmpapi.ResourceRestoreStatus, reason string, ) { + var resourceStatus storkapi.ApplicationRestoreStatusType + switch status { + case kdmpapi.ResourceRestoreStatusSuccessful: + resourceStatus = storkapi.ApplicationRestoreStatusSuccessful + case kdmpapi.ResourceRestoreStatusRetained: + resourceStatus = storkapi.ApplicationRestoreStatusRetained + case kdmpapi.ResourceRestoreStatusFailed: + resourceStatus = storkapi.ApplicationRestoreStatusFailed + case kdmpapi.ResourceRestoreStatusInProgress: + resourceStatus = storkapi.ApplicationRestoreStatusInProgress + } updatedResource := &storkapi.ApplicationRestoreResourceInfo{ ObjectInfo: storkapi.ObjectInfo{ Name: resource.Name, @@ -1100,6 +1111,8 @@ func (a *ApplicationRestoreController) updateResourceStatusFromRestoreCR( Kind: resource.Kind, }, }, + Status: resourceStatus, + Reason: reason, } restore.Status.Resources = append(restore.Status.Resources, updatedResource) } @@ -1540,7 +1553,7 @@ func (a *ApplicationRestoreController) restoreResources( a.updateResourceStatusFromRestoreCR( restore, resource, - storkapi.ApplicationRestoreStatusType(resource.Status), + resource.Status, resource.Reason) } restore.Status.Stage = storkapi.ApplicationRestoreStageFinal From fb72129986bc34ed59c033fea61cbd30c8b31eda Mon Sep 17 00:00:00 2001 From: Prashanth Kumar Date: Fri, 4 Nov 2022 11:25:17 +0000 Subject: [PATCH 71/97] Populating NFS backup size, appended nfs to job names --- pkg/applicationmanager/controllers/applicationbackup.go | 4 ++++ pkg/applicationmanager/controllers/applicationrestore.go | 4 ++-- pkg/utils/utils.go | 6 +++--- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index 977c6d5ce3..b66e70bc29 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -1427,6 +1427,10 @@ func (a *ApplicationBackupController) backupResources( backup.Status.FinishTimestamp = metav1.Now() backup.Status.Status = stork_api.ApplicationBackupStatusSuccessful backup.Status.Reason = "Volumes and resources were backed up successfully" + // Only on success compute the total backup size + for _, vInfo := range backup.Status.Volumes { + backup.Status.TotalSize += vInfo.TotalSize + } case kdmpapi.ResourceExportStatusInitial: case kdmpapi.ResourceExportStatusPending: case kdmpapi.ResourceExportStatusInProgress: diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index 6c3826a00e..ca73f400f3 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -646,7 +646,7 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat time.Sleep(retrySleep) return nil } - crName := getResourceExportCRName(utils.PrefixNFSVolRestore, string(restore.UID), restore.Namespace) + crName := getResourceExportCRName(utils.PrefixNFSRestorePVC, string(restore.UID), restore.Namespace) resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, restore.Namespace) if err != nil { if k8s_errors.IsNotFound(err) { @@ -1737,7 +1737,7 @@ func (a *ApplicationRestoreController) cleanupResources(restore *storkapi.Applic log.ApplicationRestoreLog(restore).Errorf("%v", errMsg) return err } - crName = getResourceExportCRName(utils.PrefixNFSVolRestore, string(restore.UID), restore.Namespace) + crName = getResourceExportCRName(utils.PrefixNFSRestorePVC, string(restore.UID), restore.Namespace) err = kdmpShedOps.Instance().DeleteResourceExport(crName, restore.Namespace) if err != nil && !k8s_errors.IsNotFound(err) { errMsg := fmt.Sprintf("failed to delete resource export CR [%v]: %v", crName, err) diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 115a40c110..c10603f6a8 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -25,9 +25,9 @@ const ( // PrefixBackup - prefix string that will be used for the kdmp backup job PrefixBackup = "backup" // PrefixRestore prefix string that will be used for the kdmp restore job - PrefixRestore = "restore" - // PrefixNFSVolRestore prefix string that will be used for nfs vol restore - PrefixNFSVolRestore = "nfs-vol-restore" + PrefixRestore = "nfs-restore-resource" + // PrefixNFSRestorePVC prefix string that will be used for pvc creation during nfs vol restore + PrefixNFSRestorePVC = "nfs-restore-pvc" // KdmpAnnotationPrefix - KDMP annotation prefix KdmpAnnotationPrefix = "kdmp.portworx.com/" From ef6c7ea7022d9d6e06c1c53a6bdce045726d5d7e Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Sat, 5 Nov 2022 13:56:09 +0000 Subject: [PATCH 72/97] pb-3100: Added check to take kdmp backup, if it is PX volume and NFS backuplocation. --- drivers/volume/aws/aws.go | 1 + drivers/volume/azure/azure.go | 1 + drivers/volume/csi/csi.go | 1 + drivers/volume/gcp/gcp.go | 1 + drivers/volume/kdmp/kdmp.go | 2 +- drivers/volume/linstor/linstor.go | 2 +- drivers/volume/mock/mock.go | 2 +- drivers/volume/portworx/portworx.go | 6 ++++-- drivers/volume/volume.go | 5 +++-- pkg/applicationmanager/controllers/applicationbackup.go | 7 +------ 10 files changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/volume/aws/aws.go b/drivers/volume/aws/aws.go index f762b887c9..78331013b0 100644 --- a/drivers/volume/aws/aws.go +++ b/drivers/volume/aws/aws.go @@ -116,6 +116,7 @@ func (a *aws) OwnsPVCForBackup( pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, + blType storkapi.BackupLocationType, ) bool { if cmBackupType == storkapi.ApplicationBackupGeneric { // If user has forced the backupType in config map, default to generic always diff --git a/drivers/volume/azure/azure.go b/drivers/volume/azure/azure.go index 9ea17d2bc0..2d14041e7f 100644 --- a/drivers/volume/azure/azure.go +++ b/drivers/volume/azure/azure.go @@ -157,6 +157,7 @@ func (a *azure) OwnsPVCForBackup( pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, + blType storkapi.BackupLocationType, ) bool { if cmBackupType == storkapi.ApplicationBackupGeneric { // If user has forced the backupType in config map, default to generic always diff --git a/drivers/volume/csi/csi.go b/drivers/volume/csi/csi.go index eb1da78882..5d6fbb0846 100644 --- a/drivers/volume/csi/csi.go +++ b/drivers/volume/csi/csi.go @@ -309,6 +309,7 @@ func (c *csi) OwnsPVCForBackup( pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, + blType storkapi.BackupLocationType, ) bool { if cmBackupType == storkapi.ApplicationBackupGeneric || crBackupType == storkapi.ApplicationBackupGeneric { // If user has forced the backupType in config map or applicationbackup CR, default to generic always diff --git a/drivers/volume/gcp/gcp.go b/drivers/volume/gcp/gcp.go index 73db3f64d8..4101785599 100644 --- a/drivers/volume/gcp/gcp.go +++ b/drivers/volume/gcp/gcp.go @@ -98,6 +98,7 @@ func (g *gcp) OwnsPVCForBackup( pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, + blType storkapi.BackupLocationType, ) bool { if cmBackupType == storkapi.ApplicationBackupGeneric { // If user has forced the backupType in config map, default to generic always diff --git a/drivers/volume/kdmp/kdmp.go b/drivers/volume/kdmp/kdmp.go index 76d250a3e7..d19143afc0 100644 --- a/drivers/volume/kdmp/kdmp.go +++ b/drivers/volume/kdmp/kdmp.go @@ -118,7 +118,7 @@ func (k *kdmp) Stop() error { return nil } -func (k *kdmp) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string) bool { +func (k *kdmp) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, blType storkapi.BackupLocationType) bool { // KDMP can handle any PVC type. KDMP driver will always be a fallback // option when none of the other supported drivers by stork own the PVC return true diff --git a/drivers/volume/linstor/linstor.go b/drivers/volume/linstor/linstor.go index 66ad3ddbb3..af84182f52 100644 --- a/drivers/volume/linstor/linstor.go +++ b/drivers/volume/linstor/linstor.go @@ -328,7 +328,7 @@ func (l *linstor) GetVolumeClaimTemplates(templates []v1.PersistentVolumeClaim) return linstorTemplates, nil } -func (l *linstor) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string) bool { +func (l *linstor) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, blType storkapi.BackupLocationType) bool { return l.OwnsPVC(coreOps, pvc) } diff --git a/drivers/volume/mock/mock.go b/drivers/volume/mock/mock.go index f43fbb3cbf..ed6252a43b 100644 --- a/drivers/volume/mock/mock.go +++ b/drivers/volume/mock/mock.go @@ -275,7 +275,7 @@ func (m Driver) GetPodVolumes(podSpec *v1.PodSpec, namespace string, includePend } // OwnsPVCForBackup returns true because it owns all PVCs created by tests -func (m *Driver) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string) bool { +func (m *Driver) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, blType storkapi.BackupLocationType) bool { return m.OwnsPVC(coreOps, pvc) } diff --git a/drivers/volume/portworx/portworx.go b/drivers/volume/portworx/portworx.go index 93ad6a1a38..ccd71926ff 100644 --- a/drivers/volume/portworx/portworx.go +++ b/drivers/volume/portworx/portworx.go @@ -676,8 +676,10 @@ func (p *portworx) GetClusterID() (string, error) { return cluster.Id, nil } -func (p *portworx) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string) bool { - if cmBackupType == storkapi.ApplicationBackupGeneric { +func (p *portworx) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, blType storkapi.BackupLocationType) bool { + // For portworx volume and backuplocation type is NFS, we will not own. + // It will default to kdmp + if blType == storkapi.BackupLocationNFS { return false } return p.IsSupportedPVC(coreOps, pvc, true) diff --git a/drivers/volume/volume.go b/drivers/volume/volume.go index f72c629cdb..9f6eac2111 100644 --- a/drivers/volume/volume.go +++ b/drivers/volume/volume.go @@ -120,7 +120,7 @@ type Driver interface { // OwnsPVCForBackup returns true if the PVC is owned by the driver // Since we have extra check need to done for backup case, added seperate version of API. - OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string) bool + OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, blType storkapi.BackupLocationType) bool // OwnsPV returns true if the PV is owned by the driver OwnsPV(pvc *v1.PersistentVolume) bool @@ -342,13 +342,14 @@ func GetPVCDriverForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, + blType storkapi.BackupLocationType, ) (string, error) { for _, driverName := range orderedListOfDrivers { d, ok := volDrivers[driverName] if !ok { continue } - if d.OwnsPVCForBackup(coreOps, pvc, cmBackupType, crBackupType) { + if d.OwnsPVCForBackup(coreOps, pvc, cmBackupType, crBackupType, blType) { return driverName, nil } } diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index b66e70bc29..1af8769cbb 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -571,12 +571,7 @@ func (a *ApplicationBackupController) backupVolumes(backup *stork_api.Applicatio if err != nil { return err } - // Generic Backup type is forced for all backup taken on a NFS backuplocation. - // This change will make portworx volume also to follow kdmp path. - if backupLocation.Location.Type == stork_api.BackupLocationNFS { - driverType = stork_api.ApplicationBackupGeneric - } - driverName, err = volume.GetPVCDriverForBackup(core.Instance(), &pvc, driverType, backup.Spec.BackupType) + driverName, err = volume.GetPVCDriverForBackup(core.Instance(), &pvc, driverType, backup.Spec.BackupType, backupLocation.Location.Type) if err != nil { // Skip unsupported PVCs if _, ok := err.(*errors.ErrNotSupported); ok { From 692bbf642e50ee13891a3c94357399294ba3c5c1 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Sun, 6 Nov 2022 13:01:15 +0000 Subject: [PATCH 73/97] vendor kdmp from 1.2.3 branch and sched-ops from nfs-ea branch --- go.mod | 5 +- go.sum | 10 + .../controllers/resourceexport/reconcile.go | 430 ++++++++++++++++++ .../resourceexport/resourceexport.go | 134 ++++++ .../portworx/sched-ops/k8s/kdmp/kdmp.go | 4 +- .../sched-ops/k8s/kdmp/resourcebackup.go | 64 +++ .../sched-ops/k8s/kdmp/resourceexport.go | 64 +++ vendor/google.golang.org/grpc/CONTRIBUTING.md | 7 +- .../grpc/attributes/attributes.go | 4 +- .../grpc/balancer/balancer.go | 3 +- .../grpc/balancer/base/balancer.go | 4 + .../grpc_lb_v1/load_balancer_grpc.pb.go | 2 +- .../grpc/balancer/grpclb/grpclb.go | 4 +- .../grpc/balancer/grpclb/grpclb_config.go | 2 +- .../balancer/grpclb/grpclb_remote_balancer.go | 8 +- .../grpc/balancer_conn_wrappers.go | 318 +++++++++---- .../grpc/channelz/channelz.go | 36 ++ vendor/google.golang.org/grpc/clientconn.go | 354 +++++++------- .../internal/handshaker/service/service.go | 3 +- .../proto/grpc_gcp/handshaker_grpc.pb.go | 2 +- .../grpc/credentials/google/google.go | 5 +- .../grpc/credentials/google/xds.go | 54 ++- .../grpc/credentials/insecure/insecure.go | 31 +- vendor/google.golang.org/grpc/dialoptions.go | 67 ++- .../grpc/encoding/encoding.go | 2 +- vendor/google.golang.org/grpc/go.mod | 12 +- vendor/google.golang.org/grpc/go.sum | 30 +- .../grpc/grpclog/loggerv2.go | 8 +- vendor/google.golang.org/grpc/interceptor.go | 9 +- .../balancer/gracefulswitch/gracefulswitch.go | 384 ++++++++++++++++ .../grpc/internal/binarylog/binarylog.go | 93 ++-- .../grpc/internal/binarylog/env_config.go | 6 +- .../grpc/internal/binarylog/method_logger.go | 26 +- .../grpc/internal/channelz/funcs.go | 228 ++++++---- .../grpc/internal/channelz/id.go | 75 +++ .../grpc/internal/channelz/logging.go | 91 ++-- .../grpc/internal/channelz/types.go | 23 +- .../grpc/internal/envconfig/xds.go | 21 +- .../grpc/internal/googlecloud/googlecloud.go | 76 +--- .../grpc/internal/googlecloud/manufacturer.go | 26 ++ .../googlecloud/manufacturer_linux.go | 27 ++ .../googlecloud/manufacturer_windows.go | 50 ++ .../grpc/internal/grpclog/grpclog.go | 8 +- .../grpc/internal/grpcutil/regex.go | 11 +- .../grpc/internal/internal.go | 83 +++- .../grpc/internal/metadata/metadata.go | 46 ++ .../grpc/internal/pretty/pretty.go | 82 ++++ .../grpc/internal/transport/controlbuf.go | 6 + .../grpc/internal/transport/handler_server.go | 22 +- .../grpc/internal/transport/http2_client.go | 76 ++-- .../grpc/internal/transport/http2_server.go | 121 ++--- .../grpc/internal/transport/http_util.go | 5 - .../grpc/internal/transport/transport.go | 17 +- .../grpc/metadata/metadata.go | 8 +- .../google.golang.org/grpc/picker_wrapper.go | 8 +- vendor/google.golang.org/grpc/pickfirst.go | 126 +++-- .../reflection_grpc.pb.go | 2 +- .../grpc/reflection/serverreflection.go | 416 +++++------------ vendor/google.golang.org/grpc/regenerate.sh | 33 +- vendor/google.golang.org/grpc/resolver/map.go | 55 ++- .../grpc/resolver/resolver.go | 8 +- .../grpc/resolver_conn_wrapper.go | 23 +- vendor/google.golang.org/grpc/server.go | 146 +++--- .../google.golang.org/grpc/service_config.go | 5 +- vendor/google.golang.org/grpc/stream.go | 289 +++++++----- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 2 +- vendor/modules.txt | 10 +- 68 files changed, 3050 insertions(+), 1362 deletions(-) create mode 100644 vendor/github.com/portworx/kdmp/pkg/controllers/resourceexport/reconcile.go create mode 100644 vendor/github.com/portworx/kdmp/pkg/controllers/resourceexport/resourceexport.go create mode 100644 vendor/github.com/portworx/sched-ops/k8s/kdmp/resourcebackup.go create mode 100644 vendor/github.com/portworx/sched-ops/k8s/kdmp/resourceexport.go create mode 100644 vendor/google.golang.org/grpc/channelz/channelz.go create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/id.go create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go create mode 100644 vendor/google.golang.org/grpc/internal/pretty/pretty.go diff --git a/go.mod b/go.mod index 726d357679..d0b18e694d 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( gocloud.dev v0.20.0 golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a google.golang.org/api v0.30.0 - google.golang.org/grpc v1.43.0 + google.golang.org/grpc v1.48.0 google.golang.org/grpc/examples v0.0.0-20220715232852-f601dfac73c9 // indirect gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 // indirect @@ -77,8 +77,7 @@ replace ( github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc10 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 github.com/libopenstorage/autopilot-api => github.com/libopenstorage/autopilot-api v0.6.1-0.20210301232050-ca2633c6e114 github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20220707215604-afbea03c04c5 - //github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 - github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a + github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.1 diff --git a/go.sum b/go.sum index 70aa859352..00c8724e25 100644 --- a/go.sum +++ b/go.sum @@ -338,6 +338,7 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= @@ -484,6 +485,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= @@ -1136,6 +1138,8 @@ github.com/libopenstorage/stork v1.4.1-0.20211103064004-088d8fdeaa37/go.mod h1:I github.com/libopenstorage/stork v1.4.1-0.20211113171730-e02f28e240e9/go.mod h1:NTt7xK9DqWpXLEBJI4WEz/XTUG3EkW0zcqyOMO5Xp2w= github.com/libopenstorage/stork v1.4.1-0.20220323180113-0ea773109d05/go.mod h1:h+tscSChqPpry+lUHJYFqC+Gk0JY/qi6eCkUJYBo0wQ= github.com/libopenstorage/stork v1.4.1-0.20220414104250-3c18fd21ed95/go.mod h1:yE94X0xBFSBQ9LvvJ/zppc4+XeiCAXtsHfYHm15dlcA= +github.com/libopenstorage/stork v1.4.1-0.20220902043617-635e642468d0/go.mod h1:oQ0lteROzRCxHMvESCSyOiY/9oqgO3Qrvfs5LI/jVCA= +github.com/libopenstorage/stork v1.4.1-0.20220902111346-9dbf76d2db2c/go.mod h1:KNG/pkhMCdKXXFr0nKtYybWCx2ggLCoi+I7Onylwl64= github.com/libopenstorage/stork v1.4.1-0.20221103082056-65abc8cc4e80/go.mod h1:yX+IlCrUsZekC6zxL6zHE7sBPKIudubHB3EcImzeRbI= github.com/libopenstorage/systemutils v0.0.0-20160208220149-44ac83be3ce1 h1:5vqfYYWm4b+lbkMtvvWtWBiqLbmLN6dNvWaa7wVsz/Q= github.com/libopenstorage/systemutils v0.0.0-20160208220149-44ac83be3ce1/go.mod h1:xwNGC7xiz/BQ/wbMkvHujL8Gjgseg+x41xMek7sKRRQ= @@ -1425,6 +1429,8 @@ github.com/portworx/kdmp v0.4.1-0.20220309093511-f7b925b9e53e/go.mod h1:RAXbeaO/ github.com/portworx/kdmp v0.4.1-0.20220414053457-962507678379/go.mod h1:EAVroITfYd50a0vi/ScAILl6h5RYJteuO/pg1y3vNNw= github.com/portworx/kdmp v0.4.1-0.20220710173715-5d42efc7d149 h1:KaRMV5hWbl7raiTFo20AZaXSIBBKCadzBmrXfwU+Id0= github.com/portworx/kdmp v0.4.1-0.20220710173715-5d42efc7d149/go.mod h1:nb5AupP/63ByyqAYfZ+E32LDEnP0PjgH6w+yKXxWIgE= +github.com/portworx/kdmp v0.4.1-0.20220902105026-dc14791e1508/go.mod h1:sTO9LkPkExEVE6BqowIzkrQsyBtGdaC4Vh1AcKQ4xZA= +github.com/portworx/kdmp v0.4.1-0.20220905153748-e0bb69e59f38/go.mod h1:NI2UgLITtggRvvaRA7lE4+Np4htDp+06Jf1LWksKyGA= github.com/portworx/kdmp v0.4.1-0.20221106080928-1fd01d5e6ed0 h1:orOtVtS8VcmKiorxN0E83QrTpUFiCQ5OMVOJaqhivOk= github.com/portworx/kdmp v0.4.1-0.20221106080928-1fd01d5e6ed0/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= @@ -1441,6 +1447,8 @@ github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 h1:4VuOz github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496/go.mod h1:/xDBMzUV30kbdQYaPdAFcAYqEada6ZnWi4zt4KzFzAI= github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a h1:qzoPM67cqkX6qJKzd1Wmbt9hZkY5kFYlqnbZMfG8qU0= github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a/go.mod h1:/xDBMzUV30kbdQYaPdAFcAYqEada6ZnWi4zt4KzFzAI= +github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 h1:fPdQkWEXZt+kE4o/wm6KlhwhYNDhJJpoRakcI4LcE48= +github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8/go.mod h1:8XBwmcbDuhW0TWFKCaHH4oS5xsfGFU5miSyqb0fvl3U= github.com/portworx/talisman v0.0.0-20210302012732-8af4564777f7/go.mod h1:e8a6uFpSbOlRpZQlW9aXYogC+GWAo065G0RL9hDkD4Q= github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 h1:P4Lo6jDUUKglz7rkqlK8Hg4gLXqIIrgQaEeWxcXrV8U= github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1/go.mod h1:I2wJjwLvCub+L1eNHWyHIIe6SrCreMVgwym4dCsR1WE= @@ -2282,6 +2290,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/examples v0.0.0-20220715232852-f601dfac73c9 h1:YfILpEPJFqQb3n/IN6k0VAtsoEdNWB246xetOdhu7Kw= google.golang.org/grpc/examples v0.0.0-20220715232852-f601dfac73c9/go.mod h1:gxndsbNG1n4TZcHGgsYEfVGnTxqfEdfiDv6/DADXX9o= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= diff --git a/vendor/github.com/portworx/kdmp/pkg/controllers/resourceexport/reconcile.go b/vendor/github.com/portworx/kdmp/pkg/controllers/resourceexport/reconcile.go new file mode 100644 index 0000000000..71501b6ea7 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/controllers/resourceexport/reconcile.go @@ -0,0 +1,430 @@ +package resourceexport + +import ( + "context" + "fmt" + "reflect" + + storkapi "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/libopenstorage/stork/pkg/controllers" + kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + kdmpcontroller "github.com/portworx/kdmp/pkg/controllers" + "github.com/portworx/kdmp/pkg/drivers" + "github.com/portworx/kdmp/pkg/drivers/driversinstance" + "github.com/portworx/kdmp/pkg/drivers/utils" + "github.com/portworx/sched-ops/k8s/batch" + "github.com/portworx/sched-ops/k8s/core" + "github.com/portworx/sched-ops/k8s/kdmp" + "github.com/portworx/sched-ops/task" + "github.com/sirupsen/logrus" + batchv1 "k8s.io/api/batch/v1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// updateResourceExportFields when an update needs to be done to ResourceExport +// user can choose which field to be updated and pass the same to updateStatus() +type updateResourceExportFields struct { + stage kdmpapi.ResourceExportStage + status kdmpapi.ResourceExportStatus + reason string + id string + resources []*kdmpapi.ResourceRestoreResourceInfo + VolumesInfo []*kdmpapi.ResourceBackupVolumeInfo + ExistingVolumesInfo []*kdmpapi.ResourceRestoreVolumeInfo +} + +func (c *Controller) process(ctx context.Context, in *kdmpapi.ResourceExport) (bool, error) { + funct := "re.process" + if in == nil { + return false, nil + } + resourceExport := in.DeepCopy() + if resourceExport.DeletionTimestamp != nil { + if controllers.ContainsFinalizer(resourceExport, kdmpcontroller.CleanupFinalizer) { + err := c.cleanupResources(resourceExport) + if err != nil { + return false, nil + } + } + if resourceExport.GetFinalizers() != nil { + controllers.RemoveFinalizer(resourceExport, kdmpcontroller.CleanupFinalizer) + err := c.client.Update(context.TODO(), resourceExport) + if err != nil { + errMsg := fmt.Sprintf("failed updating resourceExport CR %s: %v", resourceExport.Name, err) + logrus.Errorf("%v", errMsg) + return false, fmt.Errorf("%v", errMsg) + } + } + return true, nil + } + if resourceExport.Status.Stage == kdmpapi.ResourceExportStageFinal { + return true, nil + } + + // Set to initial status to start with + if resourceExport.Status.Status == "" { + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageInitial, + status: kdmpapi.ResourceExportStatusInitial, + reason: "", + } + return true, c.updateStatus(resourceExport, updateData) + } + // Get the driver type + opType, err := getDriverType(resourceExport) + if err != nil { + updateData := updateResourceExportFields{ + status: kdmpapi.ResourceExportStatusFailed, + reason: "fetching driver type failed", + } + return false, c.updateStatus(resourceExport, updateData) + } + + driver, err := driversinstance.Get(opType) + if err != nil { + updateData := updateResourceExportFields{ + status: kdmpapi.ResourceExportStatusFailed, + reason: "fetching driver instance failed", + } + return false, c.updateStatus(resourceExport, updateData) + } + blName := resourceExport.Spec.Destination.Name + blNamespace := resourceExport.Spec.Destination.Namespace + backupLocation, err := kdmpcontroller.ReadBackupLocation(blName, blNamespace, "") + + if err != nil { + msg := fmt.Sprintf("reading of backuplocation [%v/%v] failed: %v", blNamespace, blName, err) + logrus.Errorf(msg) + updateData := updateResourceExportFields{ + status: kdmpapi.ResourceExportStatusFailed, + reason: fmt.Sprintf("failed reading bl [%v/%v]: %v", blNamespace, blName, err), + } + return false, c.updateStatus(resourceExport, updateData) + } + + switch resourceExport.Status.Stage { + case kdmpapi.ResourceExportStageInitial: + // Create ResourceBackup CR + err = createResourceBackup(resourceExport.Name, resourceExport.Namespace) + if err != nil { + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageFinal, + status: kdmpapi.ResourceExportStatusFailed, + reason: fmt.Sprintf("failed to create ResourceBackup CR [%v/%v]", resourceExport.Namespace, resourceExport.Name), + } + return false, c.updateStatus(resourceExport, updateData) + } + // start data transfer + id, serr := startNfsResourceJob( + driver, + utils.KdmpConfigmapName, + utils.KdmpConfigmapNamespace, + resourceExport, + backupLocation, + ) + logrus.Tracef("%s: startNfsResourceJob id: %v", funct, id) + if serr != nil { + logrus.Errorf("%s: serr: %v", funct, serr) + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageFinal, + status: kdmpapi.ResourceExportStatusFailed, + reason: fmt.Sprintf("failed to create startNfsResourceJob job [%v/%v]", resourceExport.Namespace, resourceExport.Name), + } + return false, c.updateStatus(resourceExport, updateData) + } + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageInProgress, + status: kdmpapi.ResourceExportStatusInProgress, + id: id, + reason: "", + } + return false, c.updateStatus(resourceExport, updateData) + case kdmpapi.ResourceExportStageInProgress: + + // Read the job status and move the reconciler to next state + progress, err := driver.JobStatus(resourceExport.Status.TransferID) + logrus.Tracef("%s job progress: %v", funct, progress) + if err != nil { + errMsg := fmt.Sprintf("failed to get %s job status: %s", resourceExport.Status.TransferID, err) + updateData := updateResourceExportFields{ + status: kdmpapi.ResourceExportStatusFailed, + reason: errMsg, + } + return false, c.updateStatus(resourceExport, updateData) + } + if progress.Status == batchv1.JobFailed { + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageFinal, + status: kdmpapi.ResourceExportStatusFailed, + reason: fmt.Sprintf("failed to create ResourceBackup CR [%v/%v]", resourceExport.Namespace, resourceExport.Name), + } + if len(progress.Reason) == 0 { + // As we couldn't get actual reason from executor + // marking it as internal error + updateData.reason = "internal error from executor" + return true, c.updateStatus(resourceExport, updateData) + } + return true, c.updateStatus(resourceExport, updateData) + } else if progress.Status == batchv1.JobConditionType("") { + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageInProgress, + status: kdmpapi.ResourceExportStatusInProgress, + reason: "RestoreExport job in progress", + } + return true, c.updateStatus(resourceExport, updateData) + } + + var rb *kdmpapi.ResourceBackup + // Get the resourcebackup + rb, err = kdmp.Instance().GetResourceBackup(resourceExport.Name, resourceExport.Namespace) + if err != nil { + errMsg := fmt.Sprintf("failed to get resourcebackup CR [%s/%s]: %s", resourceExport.Namespace, resourceExport.Name, err) + updateData := updateResourceExportFields{ + status: kdmpapi.ResourceExportStatusFailed, + reason: errMsg, + } + return false, c.updateStatus(resourceExport, updateData) + } + + switch progress.State { + case drivers.JobStateFailed: + errMsg := fmt.Sprintf("%s transfer job failed: %s", resourceExport.Status.TransferID, progress.Reason) + // If a job has failed it means it has tried all possible retires and given up. + // In such a scenario we need to fail DE CR and move to clean up stage + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageFinal, + status: kdmpapi.ResourceExportStatusFailed, + reason: errMsg, + resources: rb.Status.Resources, + } + return true, c.updateStatus(resourceExport, updateData) + case drivers.JobStateCompleted: + // Go for clean up with success state + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageFinal, + status: kdmpapi.ResourceExportStatusSuccessful, + reason: "Job successful", + resources: rb.Status.Resources, + VolumesInfo: rb.VolumesInfo, + ExistingVolumesInfo: rb.ExistingVolumesInfo, + } + + return true, c.updateStatus(resourceExport, updateData) + } + case kdmpapi.ResourceExportStageFinal: + // Do nothing + } + + return true, nil +} + +func (c *Controller) cleanupResources(resourceExport *kdmpapi.ResourceExport) error { + // clean up resources + rbNamespace, rbName, err := utils.ParseJobID(resourceExport.Status.TransferID) + if err != nil { + errMsg := fmt.Sprintf("failed to parse job ID %v from ResourceeExport CR: %v: %v", + resourceExport.Status.TransferID, resourceExport.Name, err) + logrus.Errorf("%v", errMsg) + return err + } + err = kdmp.Instance().DeleteResourceBackup(rbName, rbNamespace) + if err != nil && !k8sErrors.IsNotFound(err) { + errMsg := fmt.Sprintf("failed to delete ResourceBackup CR[%v/%v]: %v", rbNamespace, rbName, err) + logrus.Errorf("%v", errMsg) + return err + } + if err = batch.Instance().DeleteJob(resourceExport.Name, resourceExport.Namespace); err != nil && !k8sErrors.IsNotFound(err) { + return err + } + pvcName := utils.GetPvcNameForJob(rbName) + if err := core.Instance().DeletePersistentVolumeClaim(pvcName, rbNamespace); err != nil && !k8sErrors.IsNotFound(err) { + return fmt.Errorf("delete %s/%s pvc: %s", rbNamespace, pvcName, err) + } + pvName := utils.GetPvNameForJob(rbName) + if err := core.Instance().DeletePersistentVolume(pvName); err != nil && !k8sErrors.IsNotFound(err) { + return fmt.Errorf("delete %s pv: %s", pvName, err) + } + if err := utils.CleanServiceAccount(rbName, rbNamespace); err != nil { + errMsg := fmt.Sprintf("deletion of service account %s/%s failed: %v", rbNamespace, rbName, err) + logrus.Errorf("%s: %v", "cleanupResources", errMsg) + return fmt.Errorf(errMsg) + } + if err := core.Instance().DeleteSecret(utils.GetCredSecretName(rbName), rbNamespace); err != nil && !k8sErrors.IsNotFound(err) { + errMsg := fmt.Sprintf("deletion of backup credential secret %s failed: %v", rbName, err) + logrus.Errorf(errMsg) + return fmt.Errorf(errMsg) + } + return nil +} + +func (c *Controller) updateStatus(re *kdmpapi.ResourceExport, data updateResourceExportFields) error { + var updErr error + t := func() (interface{}, bool, error) { + logrus.Infof("updateStatus data: %+v", data) + namespacedName := types.NamespacedName{} + namespacedName.Name = re.Name + namespacedName.Namespace = re.Namespace + err := c.client.Get(context.TODO(), namespacedName, re) + if err != nil { + errMsg := fmt.Sprintf("failed in getting RE CR %v/%v: %v", re.Namespace, re.Name, err) + logrus.Infof("%v", errMsg) + return "", true, fmt.Errorf("%v", errMsg) + } + + if data.status != "" { + re.Status.Status = data.status + re.Status.Reason = data.reason + } + + if data.id != "" { + re.Status.TransferID = data.id + } + + if data.stage != "" { + re.Status.Stage = data.stage + } + + if len(data.resources) != 0 { + re.Status.Resources = data.resources + } + if len(data.VolumesInfo) != 0 { + re.VolumesInfo = data.VolumesInfo + } + + if len(data.ExistingVolumesInfo) != 0 { + re.ExistingVolumesInfo = data.ExistingVolumesInfo + } + + updErr = c.client.Update(context.TODO(), re) + if updErr != nil { + errMsg := fmt.Sprintf("failed updating resourceExport CR %s: %v", re.Name, updErr) + logrus.Errorf("%v", errMsg) + return "", true, fmt.Errorf("%v", errMsg) + } + return "", false, nil + } + if _, err := task.DoRetryWithTimeout(t, kdmpcontroller.TaskDefaultTimeout, kdmpcontroller.TaskProgressCheckInterval); err != nil { + errMsg := fmt.Sprintf("max retries done, failed updating resourceExport CR %s: %v", re.Name, updErr) + logrus.Errorf("%v", errMsg) + // Exhausted all retries, fail the CR + return fmt.Errorf("%v", errMsg) + } + + return nil + +} + +func getDriverType(re *kdmpapi.ResourceExport) (string, error) { + src := re.Spec.Source + doBackup := false + doRestore := false + + if isApplicationBackupRef(src) { + doBackup = true + } else if isApplicationRestoreRef(src) { + doRestore = true + } else { + return "", fmt.Errorf("invalid kind for nfs backup destination: expected BackupLocation") + } + + switch re.Spec.Type { + case kdmpapi.ResourceExportNFS: + if doBackup { + return drivers.NFSBackup, nil + } + if doRestore { + return drivers.NFSRestore, nil + } + return "", fmt.Errorf("invalid kind for nfs source: expected nfs type") + } + return string(re.Spec.Type), nil +} + +func isApplicationBackupRef(ref kdmpapi.ResourceExportObjectReference) bool { + return ref.Kind == "ApplicationBackup" && ref.APIVersion == "stork.libopenstorage.org/v1alpha1" +} + +func isApplicationRestoreRef(ref kdmpapi.ResourceExportObjectReference) bool { + return ref.Kind == "ApplicationRestore" && ref.APIVersion == "stork.libopenstorage.org/v1alpha1" +} + +func startNfsResourceJob( + drv drivers.Interface, + jobConfigMap string, + jobConfigMapNs string, + re *kdmpapi.ResourceExport, + bl *storkapi.BackupLocation, +) (string, error) { + + err := utils.CreateNfsSecret(utils.GetCredSecretName(re.Name), bl, re.Namespace, nil) + if err != nil { + logrus.Errorf("failed to create NFS cred secret: %v", err) + return "", fmt.Errorf("failed to create NFS cred secret: %v", err) + } + switch drv.Name() { + case drivers.NFSBackup: + return drv.StartJob( + // TODO: below two calls need to be generalized and changed in all the startJob Calls + // For NFS it need to be populated in ResourceExport CR and passed to Job via its reconciler. + drivers.WithNfsImageExecutorSource(re.Spec.TriggeredFrom), + drivers.WithNfsImageExecutorSourceNs(re.Spec.TriggeredFromNs), + drivers.WithRestoreExport(re.Name), + drivers.WithJobNamespace(re.Namespace), + drivers.WithNfsServer(bl.Location.NfsConfig.ServerAddr), + drivers.WithNfsExportDir(bl.Location.Path), + drivers.WithAppCRName(re.Spec.Source.Name), + drivers.WithAppCRNamespace(re.Spec.Source.Namespace), + drivers.WithNamespace(re.Namespace), + drivers.WithResoureBackupName(re.Name), + drivers.WithResoureBackupNamespace(re.Namespace), + drivers.WithNfsMountOption(bl.Location.NfsConfig.MountOption), + ) + case drivers.NFSRestore: + return drv.StartJob( + drivers.WithNfsImageExecutorSource(re.Spec.TriggeredFrom), + drivers.WithNfsImageExecutorSourceNs(re.Spec.TriggeredFromNs), + drivers.WithRestoreExport(re.Name), + drivers.WithJobNamespace(re.Namespace), + drivers.WithNfsServer(bl.Location.NfsConfig.ServerAddr), + drivers.WithNfsExportDir(bl.Location.Path), + drivers.WithAppCRName(re.Spec.Source.Name), + drivers.WithAppCRNamespace(re.Spec.Source.Namespace), + drivers.WithNamespace(re.Namespace), + drivers.WithResoureBackupName(re.Name), + drivers.WithResoureBackupNamespace(re.Namespace), + drivers.WithNfsMountOption(bl.Location.NfsConfig.MountOption), + ) + } + return "", fmt.Errorf("unknown data transfer driver: %s", drv.Name()) +} + +func createResourceBackup(name, namespace string) error { + funct := "createResourceBackup" + + rbCR := &kdmpapi.ResourceBackup{ + TypeMeta: metav1.TypeMeta{ + Kind: reflect.TypeOf(kdmpapi.ResourceBackup{}).Name(), + APIVersion: "kdmp.portworx.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: map[string]string{ + utils.SkipResourceAnnotation: "true", + }, + }, + // TODO: As part of restore resources, prefill resources info + // so that job can update the same + Spec: kdmpapi.ResourceBackupSpec{}, + } + + _, err := kdmp.Instance().CreateResourceBackup(rbCR) + if err != nil { + logrus.Errorf("%s: %v", funct, err) + return err + } + + return nil +} diff --git a/vendor/github.com/portworx/kdmp/pkg/controllers/resourceexport/resourceexport.go b/vendor/github.com/portworx/kdmp/pkg/controllers/resourceexport/resourceexport.go new file mode 100644 index 0000000000..9021b452e1 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/controllers/resourceexport/resourceexport.go @@ -0,0 +1,134 @@ +package resourceexport + +import ( + "context" + "reflect" + + "github.com/libopenstorage/stork/pkg/controllers" + kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + kdmpcontroller "github.com/portworx/kdmp/pkg/controllers" + "github.com/portworx/kdmp/pkg/utils" + "github.com/portworx/kdmp/pkg/version" + "github.com/portworx/sched-ops/k8s/apiextensions" + "github.com/portworx/sched-ops/k8s/kdmp" + "github.com/sirupsen/logrus" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + "k8s.io/apimachinery/pkg/api/errors" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +// Controller is a k8s controller that handles ResourceExport resources. +type Controller struct { + client runtimeclient.Client +} + +// NewController returns a new instance of the controller. +func NewController(mgr manager.Manager) (*Controller, error) { + return &Controller{ + client: mgr.GetClient(), + }, nil +} + +// Init Initialize the application backup controller +func (c *Controller) Init(mgr manager.Manager) error { + err := c.createCRD() + if err != nil { + return err + } + + // Create a new controller + ctrl, err := controller.New("resource-export-controller", mgr, controller.Options{ + Reconciler: c, + MaxConcurrentReconciles: 10, + }) + if err != nil { + return err + } + + // Watch for changes to primary resource + return ctrl.Watch(&source.Kind{Type: &kdmpapi.ResourceExport{}}, &handler.EnqueueRequestForObject{}) +} + +// Reconcile reads that state of the cluster for an object and makes changes based on the state read +// and what is in the Spec. +func (c *Controller) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + restoreExport, err := kdmp.Instance().GetResourceExport(request.Name, request.Namespace) + if err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{RequeueAfter: kdmpcontroller.RequeuePeriod}, nil + } + if !controllers.ContainsFinalizer(restoreExport, kdmpcontroller.CleanupFinalizer) { + controllers.SetFinalizer(restoreExport, kdmpcontroller.CleanupFinalizer) + return reconcile.Result{Requeue: true}, c.client.Update(context.TODO(), restoreExport) + } + + requeue, err := c.process(context.TODO(), restoreExport) + if err != nil { + logrus.Errorf("fail to execute process function for restoreExport CR %v: %v", restoreExport.Name, err) + return reconcile.Result{RequeueAfter: kdmpcontroller.RequeuePeriod}, nil + } + if requeue { + return reconcile.Result{RequeueAfter: kdmpcontroller.RequeuePeriod}, nil + } + + return reconcile.Result{RequeueAfter: kdmpcontroller.ResyncPeriod}, nil +} + +func (c *Controller) createCRD() error { + requiresV1, err := version.RequiresV1Registration() + if err != nil { + return err + } + resources := []apiextensions.CustomResource{ + { + Name: kdmpapi.ResourceExportResourceName, + Plural: kdmpapi.ResourceExportResourcePlural, + Group: kdmpapi.SchemeGroupVersion.Group, + Version: kdmpapi.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.NamespaceScoped, + Kind: reflect.TypeOf(kdmpapi.ResourceExport{}).Name(), + }, + { + Name: kdmpapi.ResourceBackupResourceName, + Plural: kdmpapi.ResourceBackupResourcePlural, + Group: kdmpapi.SchemeGroupVersion.Group, + Version: kdmpapi.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.NamespaceScoped, + Kind: reflect.TypeOf(kdmpapi.ResourceBackup{}).Name(), + }, + } + + for _, res := range resources { + if requiresV1 { + err := utils.CreateCRD(res) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + if err := apiextensions.Instance().ValidateCRD(res.Plural+"."+res.Group, kdmpcontroller.ValidateCRDTimeout, kdmpcontroller.ValidateCRDInterval); err != nil { + return err + } + } else { + err = apiextensions.Instance().CreateCRDV1beta1(res) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + if err := apiextensions.Instance().ValidateCRDV1beta1(res, kdmpcontroller.ValidateCRDTimeout, kdmpcontroller.ValidateCRDInterval); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/portworx/sched-ops/k8s/kdmp/kdmp.go b/vendor/github.com/portworx/sched-ops/k8s/kdmp/kdmp.go index b1a64268c2..d822497826 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/kdmp/kdmp.go +++ b/vendor/github.com/portworx/sched-ops/k8s/kdmp/kdmp.go @@ -26,7 +26,9 @@ type Ops interface { VolumeBackupOps VolumeBackupDeleteOps BackupLocationMaintenanceOps - + ResourceExportOps + ResourceBackupOps + // SetConfig sets the config and resets the client SetConfig(config *rest.Config) } diff --git a/vendor/github.com/portworx/sched-ops/k8s/kdmp/resourcebackup.go b/vendor/github.com/portworx/sched-ops/k8s/kdmp/resourcebackup.go new file mode 100644 index 0000000000..76d6b9ae1a --- /dev/null +++ b/vendor/github.com/portworx/sched-ops/k8s/kdmp/resourcebackup.go @@ -0,0 +1,64 @@ +package kdmp + +import ( + "context" + + kdmpv1alpha1 "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ResourceBackupOps is an interface to perform k8s ResourceExport CR crud operations +type ResourceBackupOps interface { + // CreateResourceExport creates the ResourceExport CR + CreateResourceBackup(*kdmpv1alpha1.ResourceBackup) (*kdmpv1alpha1.ResourceBackup, error) + // GetResourceBackup gets the ResourceBackup CR + GetResourceBackup(string, string) (*kdmpv1alpha1.ResourceBackup, error) + // ListResourceBackup lists all the ResourceBackup CRs + ListResourceBackup(namespace string, filterOptions metav1.ListOptions) (*kdmpv1alpha1.ResourceBackupList, error) + // UpdateResourceBackup updates the ResourceBackup CR + UpdateResourceBackup(*kdmpv1alpha1.ResourceBackup) (*kdmpv1alpha1.ResourceBackup, error) + // DeleteResourceBackup deletes the ResourceBackup CR + DeleteResourceBackup(string, string) error +} + +// CreateResourceBackup creates the ResourceBackup CR +func (c *Client) CreateResourceBackup(backup *kdmpv1alpha1.ResourceBackup) (*kdmpv1alpha1.ResourceBackup, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceBackups(backup.Namespace).Create(context.TODO(), backup, metav1.CreateOptions{}) +} + +// GetResourceBackup gets the ResourceBackup CR +func (c *Client) GetResourceBackup(name, namespace string) (*kdmpv1alpha1.ResourceBackup, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceBackups(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +} + +// ListResourceBackup lists all the ResourceBackup CR +func (c *Client) ListResourceBackup(namespace string, filterOptions metav1.ListOptions) (*kdmpv1alpha1.ResourceBackupList, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceBackups(namespace).List(context.TODO(), filterOptions) +} + +// DeleteResourceBackup deletes the ResourceBackup CR +func (c *Client) DeleteResourceBackup(name string, namespace string) error { + if err := c.initClient(); err != nil { + return err + } + return c.kdmp.KdmpV1alpha1().ResourceBackups(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{ + PropagationPolicy: &deleteForegroundPolicy, + }) +} + +// UpdateResourceBackup deletes the ResourceBackup CR +func (c *Client) UpdateResourceBackup(backup *kdmpv1alpha1.ResourceBackup) (*kdmpv1alpha1.ResourceBackup, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceBackups(backup.Namespace).Update(context.TODO(), backup, metav1.UpdateOptions{}) +} diff --git a/vendor/github.com/portworx/sched-ops/k8s/kdmp/resourceexport.go b/vendor/github.com/portworx/sched-ops/k8s/kdmp/resourceexport.go new file mode 100644 index 0000000000..1a5610c0a3 --- /dev/null +++ b/vendor/github.com/portworx/sched-ops/k8s/kdmp/resourceexport.go @@ -0,0 +1,64 @@ +package kdmp + +import ( + "context" + + kdmpv1alpha1 "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ResourceExportOps is an interface to perform k8s ResourceExport CR crud operations +type ResourceExportOps interface { + // CreateResourceExport creates the ResourceExport CR + CreateResourceExport(*kdmpv1alpha1.ResourceExport) (*kdmpv1alpha1.ResourceExport, error) + // GetResourceExport gets the ResourceExport CR + GetResourceExport(string, string) (*kdmpv1alpha1.ResourceExport, error) + // ListResourceExport lists all the ResourceExport CRs + ListResourceExport(namespace string, filterOptions metav1.ListOptions) (*kdmpv1alpha1.ResourceExportList, error) + // UpdateResourceExport updates the ResourceExport CR + UpdateResourceExport(*kdmpv1alpha1.ResourceExport) (*kdmpv1alpha1.ResourceExport, error) + // DeleteResourceExport deletes the ResourceExport CR + DeleteResourceExport(string, string) error +} + +// CreateResourceExport creates the ResourceExport CR +func (c *Client) CreateResourceExport(export *kdmpv1alpha1.ResourceExport) (*kdmpv1alpha1.ResourceExport, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceExports(export.Namespace).Create(context.TODO(), export, metav1.CreateOptions{}) +} + +// GetResourceExport gets the ResourceExport CR +func (c *Client) GetResourceExport(name, namespace string) (*kdmpv1alpha1.ResourceExport, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceExports(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +} + +// ListResourceExport lists all the ResourceExport CR +func (c *Client) ListResourceExport(namespace string, filterOptions metav1.ListOptions) (*kdmpv1alpha1.ResourceExportList, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceExports(namespace).List(context.TODO(), filterOptions) +} + +// DeleteResourceExport deletes the ResourceExport CR +func (c *Client) DeleteResourceExport(name string, namespace string) error { + if err := c.initClient(); err != nil { + return err + } + return c.kdmp.KdmpV1alpha1().ResourceExports(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{ + PropagationPolicy: &deleteForegroundPolicy, + }) +} + +// UpdateResourceExport deletes the ResourceExport CR +func (c *Client) UpdateResourceExport(export *kdmpv1alpha1.ResourceExport) (*kdmpv1alpha1.ResourceExport, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceExports(export.Namespace).Update(context.TODO(), export, metav1.UpdateOptions{}) +} diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index cd03f8c768..52338d004c 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -53,9 +53,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 6ff2792ee4..ae13ddac14 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -69,7 +69,9 @@ func (a *Attributes) Value(key interface{}) interface{} { // bool' is implemented for a value in the attributes, it is called to // determine if the value matches the one stored in the other attributes. If // Equal is not implemented, standard equality is used to determine if the two -// values are equal. +// values are equal. Note that some types (e.g. maps) aren't comparable by +// default, so they must be wrapped in a struct, or in an alias type, with Equal +// defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index bcc6f5451c..f7a7697cad 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -27,6 +27,7 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" @@ -192,7 +193,7 @@ type BuildOptions struct { // server can ignore this field. Authority string // ChannelzParentID is the parent ClientConn's channelz ID. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a67074a3ad..e8dfc828aa 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -45,6 +45,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, + state: connectivity.Connecting, } // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we @@ -134,6 +135,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) return nil } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index 50cc9da4a9..cb4b3c203c 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index fe423af182..6c3402e36c 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -413,8 +413,8 @@ func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { // this target is sent in the first message on the stream. if gc != nil { target := lb.dialTarget - if gc.TargetName != "" { - target = gc.TargetName + if gc.ServiceName != "" { + target = gc.ServiceName } if target != lb.target { lb.target = target diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go index b4e23dee01..8942c31310 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -34,7 +34,7 @@ const ( type grpclbServiceConfig struct { serviceconfig.LoadBalancingConfig ChildPolicy *[]map[string]json.RawMessage - TargetName string + ServiceName string } func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go index 330df4baa2..dab1959418 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -33,8 +33,8 @@ import ( "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/channelz" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -228,7 +228,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { } else if bundle := lb.grpclbClientConnCreds; bundle != nil { dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) } else { - dopts = append(dopts, grpc.WithInsecure()) + dopts = append(dopts, grpc.WithTransportCredentials(insecure.NewCredentials())) } if lb.opt.Dialer != nil { dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) @@ -239,9 +239,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { // Explicitly set pickfirst as the balancer. dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) - if channelz.IsOn() { - dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) - } + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) // Enable Keepalive for grpclb client. dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index f4ea617468..b1c23eaae0 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -20,130 +20,178 @@ package grpc import ( "fmt" + "strings" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + cc *ClientConn -// exitIdle contains no data and is just a signal sent on the updateCh in -// ccBalancerWrapper to instruct the balancer to exit idle. -type exitIdle struct{} + // Since these fields are accessed only from handleXxx() methods which are + // synchronized by the watcher goroutine, we do not need a mutex to protect + // these fields. + balancer *gracefulswitch.Balancer + curBalancerName string -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. -type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - hasExitIdle bool - updateCh *buffer.Unbounded - closed *grpcsync.Event - done *grpcsync.Event - - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} + updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). + resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. + closed *grpcsync.Event // Indicates if close has been called. + done *grpcsync.Event // Indicates if close has completed its work. } -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, updateCh: buffer.NewUnbounded(), + resultCh: buffer.NewUnbounded(), closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) - _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler) + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { + ccs *balancer.ClientConnState +} + +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { + err error +} + +type switchToUpdate struct { + name string +} + +type subConnUpdate struct { + acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.updateCh.Get(): + case u := <-ccb.updateCh.Get(): ccb.updateCh.Load() if ccb.closed.HasFired() { break } - switch u := t.(type) { + switch update := u.(type) { + case *ccStateUpdate: + ccb.handleClientConnStateChange(update.ccs) case *scStateUpdate: - ccb.balancerMu.Lock() - ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) - ccb.balancerMu.Unlock() - case *acBalancerWrapper: - ccb.mu.Lock() - if ccb.subConns != nil { - delete(ccb.subConns, u) - ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) - } - ccb.mu.Unlock() - case exitIdle: - if ccb.cc.GetState() == connectivity.Idle { - if ei, ok := ccb.balancer.(balancer.ExitIdler); ok { - // We already checked that the balancer implements - // ExitIdle before pushing the event to updateCh, but - // check conditionally again as defensive programming. - ccb.balancerMu.Lock() - ei.ExitIdle() - ccb.balancerMu.Unlock() - } - } + ccb.handleSubConnStateChange(update) + case *exitIdleUpdate: + ccb.handleExitIdle() + case *resolverErrorUpdate: + ccb.handleResolverError(update.err) + case *switchToUpdate: + ccb.handleSwitchTo(update.name) + case *subConnUpdate: + ccb.handleRemoveSubConn(update.acbw) default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) } case <-ccb.closed.Done(): } if ccb.closed.HasFired() { - ccb.balancerMu.Lock() - ccb.balancer.Close() - ccb.balancerMu.Unlock() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) - ccb.done.Fire() - // Fire done before removing the addr conns. We can safely unblock - // ccb.close and allow the removeAddrConns to happen - // asynchronously. - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } + ccb.handleClose() return } } } -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + + var res interface{} + select { + case res = <-ccb.resultCh.Get(): + ccb.resultCh.Load() + case <-ccb.closed.Done(): + // Return early if the balancer wrapper is closed while we are waiting for + // the underlying balancer to process a ClientConnState update. + return nil + } + // If the returned error is nil, attempting to type assert to error leads to + // panic. So, this needs to handled separately. + if res == nil { + return nil + } + return res.(error) } -func (ccb *ccBalancerWrapper) exitIdle() bool { - if !ccb.hasExitIdle { - return false +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { + if ccb.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) + } + ccs.ResolverState.Addresses = addrs } - ccb.updateCh.Put(exitIdle{}) - return true + ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) } -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be // created. tearDown() generates a state change with Shutdown state, we @@ -161,44 +209,125 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co }) } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { + ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) +} + +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { + if ccb.cc.GetState() != connectivity.Idle { + return + } + ccb.balancer.ExitIdle() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() + ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { ccb.balancer.ResolverError(err) } +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.updateCh.Put(&switchToUpdate{name: name}) +} + +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { + // TODO: Other languages use case-insensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + + // TODO: Ensure that name is a registered LB policy when we get here. + // We currently only validate the `loadBalancingConfig` field. We need to do + // the same for the `loadBalancingPolicy` field and reject the service config + // if the specified policy is not registered. + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. +// +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleClose() { + ccb.balancer.Close() + ccb.done.Fire() +} + func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { if len(addrs) <= 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") - } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } acbw := &acBalancerWrapper{ac: ac} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock - // during switchBalancer() if the old balancer calls RemoveSubConn() in its - // Close(). - ccb.updateCh.Put(sc) + // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it + // was required to handle the RemoveSubConn() method asynchronously by pushing + // the update onto the update channel. This was done to avoid a deadlock as + // switchBalancer() was holding cc.mu when calling Close() on the old + // balancer, which would in turn call RemoveSubConn(). + // + // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this + // asynchronously is probably not required anymore since the switchTo() method + // handles the balancer switch by pushing the update onto the channel. + // TODO(easwars): Handle this inline. + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -210,11 +339,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 0000000000..a220c47c59 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 28f09dc870..0d21f2210b 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -79,7 +79,7 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") @@ -146,6 +146,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + for _, opt := range extraDialOptions { + opt.apply(&cc.dopts) + } + for _, opt := range opts { opt.apply(&cc.dopts) } @@ -159,23 +163,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtInfo, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") + pid := cc.dopts.channelzParentID + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, } - cc.csMgr.channelzID = cc.channelzID } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { return nil, errNoTransportSecurity @@ -281,7 +282,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerBuildOpts = balancer.BuildOptions{ + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, @@ -289,7 +290,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, - } + }) // Build the resolver. rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) @@ -398,7 +399,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier } // updateState updates the connectivity.State of ClientConn. @@ -464,34 +465,36 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error @@ -536,14 +539,7 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() { - return - } - for ac := range cc.conns { - go ac.connect() - } + cc.balancerWrapper.exitIdle() } func (cc *ClientConn) scWatcher() { @@ -623,9 +619,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -653,16 +647,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) - } - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -670,24 +658,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -696,56 +672,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. // // Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - // Don't hold cc.mu while closing the balancers. The balancers may call - // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex - // would cause a deadlock in that case. - cc.mu.Unlock() - cc.balancerWrapper.close() - cc.mu.Lock() - } - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + cc.balancerWrapper.updateSubConnState(sc, s, err) } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. @@ -768,17 +716,21 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + cc.conns[ac] = struct{}{} cc.mu.Unlock() return ac, nil @@ -853,16 +805,31 @@ func (ac *addrConn) connect() error { return nil } +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + // tryUpdateAddrs tries to update ac.addrs with the new addresses list. // -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// // If ac is TransientFailure, it updates ac.addrs and returns true. The updated // addresses will be picked up by retry in the next iteration after backoff. // // If ac is Shutdown or Idle, it updates ac.addrs and returns true. // +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. // - If true, it updates ac.addrs and returns true. The ac will keep using @@ -879,6 +846,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return true } + if equalAddresses(ac.addrs, addrs) { + return true + } + if ac.state == connectivity.Connecting { return false } @@ -959,14 +930,10 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { @@ -991,35 +958,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1070,11 +1028,11 @@ func (cc *ClientConn) Close() error { rWrapper := cc.resolverWrapper cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil cc.mu.Unlock() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. cc.blockingpicker.close() - if bWrapper != nil { bWrapper.close() } @@ -1085,22 +1043,22 @@ func (cc *ClientConn) Close() error { for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", + ted := &channelz.TraceEventDesc{ + Desc: "Channel deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1130,7 +1088,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1284,6 +1242,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.mu.Lock() defer ac.mu.Unlock() defer connClosed.Fire() + defer hcancel() if !hcStarted || hctx.Err() != nil { // We didn't start the health check or set the state to READY, so // no need to do anything else here. @@ -1294,7 +1253,6 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // state, since there may be a new transport in this addrConn. return } - hcancel() ac.transport = nil // Refresh the name resolver ac.cc.resolveNow(resolver.ResolveNowOptions{}) @@ -1312,14 +1270,13 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err) + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } @@ -1332,7 +1289,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr.Close(transport.ErrConnClosing) if connectCtx.Err() == context.DeadlineExceeded { err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err) return err } return nil @@ -1497,19 +1454,18 @@ func (ac *addrConn) tearDown(err error) { curTr.GracefulClose() ac.mu.Lock() } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go index 77d759cd95..2de2c4affd 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -24,6 +24,7 @@ import ( "sync" grpc "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) var ( @@ -49,7 +50,7 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { // Create a new connection to the handshaker service. Note that // this connection stays open until the application is closed. var err error - hsConn, err = hsDialer(hsAddress, grpc.WithInsecure()) + hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index a02c458281..fd55176b9b 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go index 63625a4b68..fbdf7dc299 100644 --- a/vendor/google.golang.org/grpc/credentials/google/google.go +++ b/vendor/google.golang.org/grpc/credentials/google/google.go @@ -50,7 +50,7 @@ func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credential ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) defer cancel() var err error - opts.PerRPCCreds, err = oauth.NewApplicationDefault(ctx) + opts.PerRPCCreds, err = newADC(ctx) if err != nil { logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err) } @@ -112,6 +112,9 @@ var ( newALTS = func() credentials.TransportCredentials { return alts.NewClientCreds(alts.DefaultClientOptions()) } + newADC = func(ctx context.Context) (credentials.PerRPCCredentials, error) { + return oauth.NewApplicationDefault(ctx) + } ) // NewWithMode should make a copy of Bundle, and switch mode. Modifying the diff --git a/vendor/google.golang.org/grpc/credentials/google/xds.go b/vendor/google.golang.org/grpc/credentials/google/xds.go index b8c2e8f920..e32edc0421 100644 --- a/vendor/google.golang.org/grpc/credentials/google/xds.go +++ b/vendor/google.golang.org/grpc/credentials/google/xds.go @@ -21,6 +21,7 @@ package google import ( "context" "net" + "net/url" "strings" "google.golang.org/grpc/credentials" @@ -28,12 +29,16 @@ import ( ) const cfeClusterNamePrefix = "google_cfe_" +const cfeClusterResourceNamePrefix = "/envoy.config.cluster.v3.Cluster/google_cfe_" +const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com" // clusterTransportCreds is a combo of TLS + ALTS. // // On the client, ClientHandshake picks TLS or ALTS based on address attributes. // - if attributes has cluster name -// - if cluster name has prefix "google_cfe_", use TLS +// - if cluster name has prefix "google_cfe_", or +// "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_", +// use TLS // - otherwise, use ALTS // - else, do TLS // @@ -50,18 +55,49 @@ func newClusterTransportCreds(tls, alts credentials.TransportCredentials) *clust } } -func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { +// clusterName returns the xDS cluster name stored in the attributes in the +// context. +func clusterName(ctx context.Context) string { chi := credentials.ClientHandshakeInfoFromContext(ctx) if chi.Attributes == nil { - return c.tls.ClientHandshake(ctx, authority, rawConn) + return "" + } + cluster, _ := internal.GetXDSHandshakeClusterName(chi.Attributes) + return cluster +} + +// isDirectPathCluster returns true if the cluster in the context is a +// directpath cluster, meaning ALTS should be used. +func isDirectPathCluster(ctx context.Context) bool { + cluster := clusterName(ctx) + if cluster == "" { + // No cluster; not xDS; use TLS. + return false + } + if strings.HasPrefix(cluster, cfeClusterNamePrefix) { + // xDS cluster prefixed by "google_cfe_"; use TLS. + return false } - cn, ok := internal.GetXDSHandshakeClusterName(chi.Attributes) - if !ok || strings.HasPrefix(cn, cfeClusterNamePrefix) { - return c.tls.ClientHandshake(ctx, authority, rawConn) + if !strings.HasPrefix(cluster, "xdstp:") { + // Other xDS cluster name; use ALTS. + return true + } + u, err := url.Parse(cluster) + if err != nil { + // Shouldn't happen, but assume ALTS. + return true + } + // If authority AND path match our CFE checks, use TLS; otherwise use ALTS. + return u.Host != cfeClusterAuthorityName || !strings.HasPrefix(u.Path, cfeClusterResourceNamePrefix) +} + +func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if isDirectPathCluster(ctx) { + // If attributes have cluster name, and cluster name is not cfe, it's a + // backend address, use ALTS. + return c.alts.ClientHandshake(ctx, authority, rawConn) } - // If attributes have cluster name, and cluster name is not cfe, it's a - // backend address, use ALTS. - return c.alts.ClientHandshake(ctx, authority, rawConn) + return c.tls.ClientHandshake(ctx, authority, rawConn) } func (c *clusterTransportCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 22a8f996a6..82bee1443b 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -18,11 +18,6 @@ // Package insecure provides an implementation of the // credentials.TransportCredentials interface which disables transport security. -// -// Experimental -// -// Notice: This package is EXPERIMENTAL and may be changed or removed in a -// later release. package insecure import ( @@ -75,3 +70,29 @@ type info struct { func (info) AuthType() string { return "insecure" } + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 063f1e903c..75d01ba777 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -20,12 +20,11 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" @@ -36,6 +35,15 @@ import ( "google.golang.org/grpc/stats" ) +func init() { + internal.AddExtraDialOptions = func(opt ...DialOption) { + extraDialOptions = append(extraDialOptions, opt...) + } + internal.ClearExtraDialOptions = func() { + extraDialOptions = nil + } +} + // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { @@ -45,19 +53,17 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -73,6 +79,8 @@ type DialOption interface { apply(*dialOptions) } +var extraDialOptions []DialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // @@ -195,25 +203,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -272,7 +261,7 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { }) } -// WithBlock returns a DialOption which makes caller of Dial blocks until the +// WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. func WithBlock() DialOption { @@ -304,8 +293,8 @@ func WithReturnConnectionError() DialOption { // WithCredentialsBundle or WithPerRPCCredentials) which require transport // security is incompatible and will cause grpc.Dial() to fail. // -// Deprecated: use insecure.NewCredentials() instead. -// Will be supported throughout 1.x. +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.TransportCredentials = insecure.NewCredentials() @@ -402,7 +391,7 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { // all the RPCs and underlying network connections in this ClientConn. func WithStatsHandler(h stats.Handler) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.StatsHandler = h + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) }) } @@ -498,7 +487,7 @@ func WithAuthority(a string) DialOption { // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id int64) DialOption { +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 6d84f74c7d..18e530fc90 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -108,7 +108,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index fcffdceef2..6a760ed743 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -6,14 +6,14 @@ require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 - github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 + github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/protobuf v1.4.3 - github.com/google/go-cmp v0.5.0 + github.com/golang/protobuf v1.5.2 + github.com/google/go-cmp v0.5.6 github.com/google/uuid v1.1.2 - golang.org/x/net v0.0.0-20200822124328-c89045814202 + golang.org/x/net v0.0.0-20201021035429-f5854403a974 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd + golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 - google.golang.org/protobuf v1.25.0 + google.golang.org/protobuf v1.27.1 ) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index 8b542e0beb..5f418dba1b 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -12,8 +12,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= @@ -22,8 +22,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 h1:fP+fF0up6oPY49OrjPrhIJ8yQfdIM85NXMLkMg1EXVs= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -40,14 +40,18 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -72,8 +76,9 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -84,10 +89,14 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -117,8 +126,11 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 34098bb8eb..7c1f664090 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -248,12 +248,12 @@ func (g *loggerT) V(l int) bool { // later release. type DepthLoggerV2 interface { LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 668e0adcf0..bb96ef57be 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -72,9 +72,12 @@ type UnaryServerInfo struct { } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 0000000000..08666f62a7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,384 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 5cc3aeddb2..e3dfe204f9 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -31,7 +31,7 @@ import ( // Logger is the global binary logger. It can be used to get binary logger for // each method. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be @@ -42,24 +42,31 @@ var binLogger Logger var grpclogLogger = grpclog.Component("binarylog") -// SetLogger sets the binarg logger. +// SetLogger sets the binary logger. // // Only call this at init time. func SetLogger(l Logger) { binLogger = l } +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + // GetMethodLogger returns the methodLogger for the given methodName. // // methodName should be in the format of "/service/method". // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -68,17 +75,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -88,57 +107,57 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // // New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // // New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } @@ -148,23 +167,23 @@ func (l *logger) setBlacklist(method string) error { // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return newMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return newMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return newMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index d8f4e7602f..ab589a76bf 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0cdb418315..24df0a1a0c 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -48,7 +48,11 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +type MethodLogger interface { + Log(LogEntryConfig) +} + +type methodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,8 +61,8 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +func newMethodLogger(h, m uint64) *methodLogger { + return &methodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -69,8 +73,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in methodLogger as possible. +func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -85,11 +91,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { case *pb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *methodLogger) Log(c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -119,7 +129,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index cd1807543e..777cbcd792 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,6 +24,8 @@ package channelz import ( + "context" + "errors" "fmt" "sort" "sync" @@ -49,7 +51,8 @@ var ( // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { - NewChannelzStorage() + db.set(newChannelMap()) + idGen.reset() atomic.StoreInt32(&curState, 1) } } @@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorage initializes channelz data storage and id generator. +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. // -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) idGen.reset() + return func() error { - var err error cm := db.get() if cm == nil { return nil } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { return nil } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) + <-ticker.C } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err } } @@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid) - } else { - db.get().addChannel(id, cn, false, pid) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") } id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -244,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") } id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid) - return id + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") } id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid) - return id + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } // RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. @@ -326,6 +367,17 @@ type channelMap struct { normalSockets map[int64]*normalSocket } +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 0000000000..c9a27acd37 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index b0013f9c88..8e13a3d2ce 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 3c595d154b..ad0ce4dabf 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -686,12 +686,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 93522d716d..55aaeea8b4 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -26,13 +26,13 @@ import ( const ( // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. // Do not use this and read from env directly. Its value is read and kept in - // variable BootstrapFileName. + // variable XDSBootstrapFileName. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" - // XDSBootstrapFileContentEnv is the env variable to set bootstrapp file + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file // content. Do not use this and read from env directly. Its value is read - // and kept in variable BootstrapFileName. + // and kept in variable XDSBootstrapFileContent. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" @@ -41,7 +41,9 @@ const ( clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" + rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" ) @@ -75,16 +77,25 @@ var ( // environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to // "true". - XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false") // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") - + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". + XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true") // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + // XDSRLS indicates whether processing of Cluster Specifier plugins and + // support for the RLS CLuster Specifier is enabled, which can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to + // "true". + XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) ) diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go index d6c9e03fc4..6717b757f8 100644 --- a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go @@ -20,13 +20,6 @@ package googlecloud import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "regexp" "runtime" "strings" "sync" @@ -35,43 +28,9 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const ( - linuxProductNameFile = "/sys/class/dmi/id/product_name" - windowsCheckCommand = "powershell.exe" - windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" - powershellOutputFilter = "Manufacturer" - windowsManufacturerRegex = ":(.*)" - - logPrefix = "[googlecloud]" -) +const logPrefix = "[googlecloud]" var ( - // The following two variables will be reassigned in tests. - runningOS = runtime.GOOS - manufacturerReader = func() (io.Reader, error) { - switch runningOS { - case "linux": - return os.Open(linuxProductNameFile) - case "windows": - cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) - out, err := cmd.Output() - if err != nil { - return nil, err - } - for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { - if strings.HasPrefix(line, powershellOutputFilter) { - re := regexp.MustCompile(windowsManufacturerRegex) - name := re.FindString(line) - name = strings.TrimLeft(name, ":") - return strings.NewReader(name), nil - } - } - return nil, errors.New("cannot determine the machine's manufacturer") - default: - return nil, fmt.Errorf("%s is not supported", runningOS) - } - } - vmOnGCEOnce sync.Once vmOnGCE bool @@ -84,21 +43,21 @@ var ( // package. We keep this to avoid depending on the cloud library module. func OnGCE() bool { vmOnGCEOnce.Do(func() { - vmOnGCE = isRunningOnGCE() + mf, err := manufacturer() + if err != nil { + logger.Infof("failed to read manufacturer, setting onGCE=false: %v") + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) }) return vmOnGCE } -// isRunningOnGCE checks whether the local system, without doing a network request is +// isRunningOnGCE checks whether the local system, without doing a network request, is // running on GCP. -func isRunningOnGCE() bool { - manufacturer, err := readManufacturer() - if err != nil { - logger.Infof("failed to read manufacturer %v, returning OnGCE=false", err) - return false - } +func isRunningOnGCE(manufacturer []byte, goos string) bool { name := string(manufacturer) - switch runningOS { + switch goos { case "linux": name = strings.TrimSpace(name) return name == "Google" || name == "Google Compute Engine" @@ -111,18 +70,3 @@ func isRunningOnGCE() bool { return false } } - -func readManufacturer() ([]byte, error) { - reader, err := manufacturerReader() - if err != nil { - return nil, err - } - if reader == nil { - return nil, errors.New("got nil reader") - } - manufacturer, err := ioutil.ReadAll(reader) - if err != nil { - return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err) - } - return manufacturer, nil -} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go new file mode 100644 index 0000000000..ffa0f1ddee --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go @@ -0,0 +1,26 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go new file mode 100644 index 0000000000..e53b8ffc83 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go @@ -0,0 +1,27 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import "io/ioutil" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return ioutil.ReadFile(linuxProductNameFile) +} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go new file mode 100644 index 0000000000..2d7aaaaa70 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index e6f975cbf6..30a3b4258f 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -115,12 +115,12 @@ type LoggerV2 interface { // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go index 2810a8ba2f..7a092b2b80 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -20,9 +20,12 @@ package grpcutil import "regexp" -// FullMatchWithRegex returns whether the full string matches the regex provided. -func FullMatchWithRegex(re *regexp.Regexp, string string) bool { +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) + } re.Longest() - rem := re.FindString(string) - return len(rem) == len(string) + rem := re.FindString(text) + return len(rem) == len(text) } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 1b596bf357..83018be7c7 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -38,11 +38,10 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the // there's difference in white space. EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool @@ -64,6 +63,76 @@ var ( // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports interface{} // func(*grpc.Server, string) + // AddExtraServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddExtraServerOptions interface{} // func(opt ...ServerOption) + // ClearExtraServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + ClearExtraServerOptions func() + // AddExtraDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddExtraDialOptions interface{} // func(opt ...DialOption) + // ClearExtraDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + ClearExtraDialOptions func() + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() + + // RegisterOutlierDetectionBalancerForTesting registers the Outlier + // Detection Balancer for testing purposes, regardless of the Outlier + // Detection environment variable. + // + // TODO: Remove this function once the Outlier Detection env var is removed. + RegisterOutlierDetectionBalancerForTesting func() + + // UnregisterOutlierDetectionBalancerForTesting unregisters the Outlier + // Detection Balancer for testing purposes. This is needed because there is + // no way to unregister the Outlier Detection Balancer after registering it + // solely for testing purposes using + // RegisterOutlierDetectionBalancerForTesting(). + // + // TODO: Remove this function once the Outlier Detection env var is removed. + UnregisterOutlierDetectionBalancerForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -86,3 +155,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index b8733dbf34..b2980f8ac4 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -22,6 +22,9 @@ package metadata import ( + "fmt" + "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -72,3 +75,46 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } + +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +func Validate(md metadata.MD) error { + for k, vals := range md { + // pseudo-header will be ignored + if k[0] == ':' { + continue + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(k); i++ { + r := k[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) + } + } + if strings.HasSuffix(k, "-bin") { + continue + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) + } + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 0000000000..0177af4b51 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 8394d252df..244f4b081d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -137,6 +137,7 @@ type earlyAbortStream struct { streamID uint32 contentSubtype string status *status.Status + rst bool } func (*earlyAbortStream) isTransportResponseFrame() bool { return false } @@ -786,6 +787,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { return err } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } return nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 1c3459c2b4..090120925b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -49,7 +49,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC // from inside an http.Handler. It requires that the http Server // supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { return nil, errors.New("gRPC requires HTTP/2") } @@ -138,7 +138,7 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats stats.Handler + stats []stats.Handler } func (ht *serverHandlerTransport) Close() { @@ -228,10 +228,10 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) if err == nil { // transport has not been closed - if ht.stats != nil { - // Note: The trailer fields are compressed with hpack after this call returns. - // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -314,10 +314,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { }) if err == nil { - if ht.stats != nil { + for _, sh := range ht.stats { // Note: The header fields are compressed with hpack after this call returns. // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + sh.HandleRPC(s.Context(), &stats.OutHeader{ Header: md.Copy(), Compression: s.sendCompress, }) @@ -369,14 +369,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) - if ht.stats != nil { - s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range ht.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: ht.RemoteAddr(), Compression: s.recvCompress, } - ht.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index f0c72d3371..be371c6e0f 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -90,7 +90,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandler stats.Handler + statsHandlers []stats.Handler initialWindowSize int32 @@ -132,7 +132,7 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData onGoAway func(GoAwayReason) @@ -311,7 +311,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandler: opts.StatsHandler, + statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, onPrefaceReceipt: onPrefaceReceipt, nextID: 1, @@ -341,18 +341,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - if t.statsHandler != nil { - t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -630,8 +631,8 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // the wire. However, there are two notable exceptions: // // 1. If the stream headers violate the max header list size allowed by the -// server. In this case there is no reason to retry at all, as it is -// assumed the RPC would continue to fail on subsequent attempts. +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. // 2. If the credentials errored when requesting their headers. In this case, // it's possible a retry can fix the problem, but indefinitely transparently // retrying is not appropriate as it is likely the credentials, if they can @@ -639,8 +640,7 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call type NewStreamError struct { Err error - DoNotRetry bool - DoNotTransparentRetry bool + AllowTransparentRetry bool } func (e NewStreamError) Error() string { @@ -649,11 +649,11 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -753,13 +753,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return true }, hdr) if err != nil { - return nil, &NewStreamError{Err: err} + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { @@ -767,29 +768,32 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea case <-ctx.Done(): return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, &NewStreamError{Err: errStreamDrain} + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, &NewStreamError{Err: ErrConnClosing} + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if t.statsHandler != nil { + if len(t.statsHandlers) != 0 { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) } - t.statsHandler.HandleRPC(s.ctx, outHeader) } return s, nil } @@ -898,9 +902,7 @@ func (t *http2Client) Close(err error) { t.controlBuf.finish() t.cancel() t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() @@ -917,11 +919,11 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { connEnd := &stats.ConnEnd{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } @@ -1433,7 +1435,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { if isHeader { inHeader := &stats.InHeader{ Client: true, @@ -1441,14 +1443,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, } - t.statsHandler.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), } - t.statsHandler.HandleRPC(s.ctx, inTrailer) + sh.HandleRPC(s.ctx, inTrailer) } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 2c6eaf0e59..2b0fde334c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,7 +21,6 @@ package transport import ( "bytes" "context" - "errors" "fmt" "io" "math" @@ -36,6 +35,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -52,10 +52,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -82,7 +82,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats stats.Handler + stats []stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -117,7 +117,7 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool @@ -231,6 +231,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime @@ -252,7 +257,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*Stream), - stats: config.StatsHandler, + stats: config.StatsHandlers, kp: kp, idle: time.Now(), kep: kep, @@ -267,20 +272,20 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - if t.stats != nil { - t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.stats { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{} - t.stats.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { @@ -443,6 +448,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) return false } @@ -516,14 +522,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if httpMethod != http.MethodPost { t.mu.Unlock() + errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + logger.Infof("transport: %v", errMsg) } - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeProtocol, - onWrite: func() {}, + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) s.cancel() return false @@ -544,6 +552,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: s.id, contentSubtype: s.contentSubtype, status: stat, + rst: !frame.StreamEnded(), }) return false } @@ -561,8 +570,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) - if t.stats != nil { - s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: t.remoteAddr, @@ -571,7 +580,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), } - t.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) @@ -925,11 +934,25 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } + + if s.getState() == streamDone { + return t.streamContextErr(s) + } + s.hdrMu.Lock() if md.Len() > 0 { if s.header.Len() > 0 { @@ -940,7 +963,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } if err := t.writeHeaderLocked(s); err != nil { s.hdrMu.Unlock() - return err + return status.Convert(err).Err() } s.hdrMu.Unlock() return nil @@ -973,14 +996,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - if t.stats != nil { + for _, sh := range t.stats { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. outHeader := &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, } - t.stats.HandleRPC(s.Context(), outHeader) + sh.HandleRPC(s.Context(), outHeader) } return nil } @@ -1041,10 +1064,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - if t.stats != nil { + for _, sh := range t.stats { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -1056,23 +1079,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -1082,12 +1094,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -1210,25 +1217,19 @@ func (t *http2Server) Close() { if err := t.conn.Close(); err != nil && logger.V(logLevel) { logger.Infof("transport: error closing conn during Close: %v", err) } - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() } - if t.stats != nil { + for _, sh := range t.stats { connEnd := &stats.ConnEnd{} - t.stats.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1250,6 +1251,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1269,6 +1275,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index d8247bcdf6..b775130686 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -322,8 +322,6 @@ type bufWriter struct { batchSize int conn net.Conn err error - - onFlush func() } func newBufWriter(conn net.Conn, batchSize int) *bufWriter { @@ -360,9 +358,6 @@ func (w *bufWriter) Flush() error { if w.offset == 0 { return nil } - if w.onFlush != nil { - w.onFlush() - } _, w.err = w.conn.Write(w.buf[:w.offset]) w.offset = 0 return w.err diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index d3bf65b2bd..6c3ba85159 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -522,14 +523,14 @@ type ServerConfig struct { ConnectionTimeout time.Duration Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandler stats.Handler + StatsHandlers []stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -552,8 +553,8 @@ type ConnectOptions struct { CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters - // StatsHandler stores the handler for stats. - StatsHandler stats.Handler + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. @@ -563,7 +564,7 @@ type ConnectOptions struct { // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -741,6 +742,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 3604c7819f..8e0f6abe89 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -188,7 +188,9 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + s := make([]string, len(v)) + copy(s, v) + out[key] = s } return out, true } @@ -226,7 +228,9 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + s := make([]string, len(v)) + copy(s, v) + out[key] = s } for _, added := range raw.added { if len(added)%2 == 1 { diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index e8367cb899..843633c910 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -131,7 +131,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } if _, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, err + return nil, nil, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -175,3 +175,9 @@ func (pw *pickerWrapper) close() { pw.done = true close(pw.blockingCh) } + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 5168b62b07..fb7a99e0a2 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -44,79 +44,107 @@ func (*pickfirstBuilder) Name() string { } type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) - } if logger.V(2) { logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + return nil + } + + subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) - b.sc.Connect() + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } - if b.sc != sc { + if b.subConn != subConn { if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + b.state = state.ConnectivityState + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { + switch state.ConnectivityState { case connectivity.Ready: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) case connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } } @@ -125,8 +153,8 @@ func (b *pickfirstBalancer) Close() { } func (b *pickfirstBalancer) ExitIdle() { - if b.sc != nil && b.state == connectivity.Idle { - b.sc.Connect() + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() } } @@ -135,18 +163,18 @@ type picker struct { err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - sc balancer.SubConn + subConn balancer.SubConn } -func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - i.sc.Connect() +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index 7d05c14ebd..4e6a6b1a85 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: reflection/grpc_reflection_v1alpha/reflection.proto diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index 82a5ba7f24..81344abd77 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -37,21 +37,17 @@ To register server reflection on a gRPC server: package reflection // import "google.golang.org/grpc/reflection" import ( - "bytes" - "compress/gzip" - "fmt" "io" - "io/ioutil" - "reflect" "sort" - "sync" - "github.com/golang/protobuf/proto" - dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" "google.golang.org/grpc" "google.golang.org/grpc/codes" rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -59,339 +55,174 @@ import ( // as a registry, for accumulating the services exposed by the server. type GRPCServer interface { grpc.ServiceRegistrar - GetServiceInfo() map[string]grpc.ServiceInfo + ServiceInfoProvider } var _ GRPCServer = (*grpc.Server)(nil) -type serverReflectionServer struct { - rpb.UnimplementedServerReflectionServer - s GRPCServer - - initSymbols sync.Once - serviceNames []string - symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files -} - // Register registers the server reflection service on the given gRPC server. func Register(s GRPCServer) { - rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ - s: s, - }) + svr := NewServer(ServerOptions{Services: s}) + rpb.RegisterServerReflectionServer(s, svr) } -// protoMessage is used for type assertion on proto messages. -// Generated proto message implements function Descriptor(), but Descriptor() -// is not part of interface proto.Message. This interface is needed to -// call Descriptor(). -type protoMessage interface { - Descriptor() ([]byte, []int) -} - -func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) { - s.initSymbols.Do(func() { - serviceInfo := s.s.GetServiceInfo() - - s.symbols = map[string]*dpb.FileDescriptorProto{} - s.serviceNames = make([]string, 0, len(serviceInfo)) - processed := map[string]struct{}{} - for svc, info := range serviceInfo { - s.serviceNames = append(s.serviceNames, svc) - fdenc, ok := parseMetadata(info.Metadata) - if !ok { - continue - } - fd, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fd, processed) - } - sort.Strings(s.serviceNames) - }) - - return s.serviceNames, s.symbols -} - -func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) { - filename := fd.GetName() - if _, ok := processed[filename]; ok { - return - } - processed[filename] = struct{}{} - - prefix := fd.GetPackage() - - for _, msg := range fd.MessageType { - s.processMessage(fd, prefix, msg) - } - for _, en := range fd.EnumType { - s.processEnum(fd, prefix, en) - } - for _, ext := range fd.Extension { - s.processField(fd, prefix, ext) - } - for _, svc := range fd.Service { - svcName := fqn(prefix, svc.GetName()) - s.symbols[svcName] = fd - for _, meth := range svc.Method { - name := fqn(svcName, meth.GetName()) - s.symbols[name] = fd - } - } - - for _, dep := range fd.Dependency { - fdenc := proto.FileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fdDep, processed) - } -} - -func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) { - msgName := fqn(prefix, msg.GetName()) - s.symbols[msgName] = fd - - for _, nested := range msg.NestedType { - s.processMessage(fd, msgName, nested) - } - for _, en := range msg.EnumType { - s.processEnum(fd, msgName, en) - } - for _, ext := range msg.Extension { - s.processField(fd, msgName, ext) - } - for _, fld := range msg.Field { - s.processField(fd, msgName, fld) - } - for _, oneof := range msg.OneofDecl { - oneofName := fqn(msgName, oneof.GetName()) - s.symbols[oneofName] = fd - } -} - -func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) { - enName := fqn(prefix, en.GetName()) - s.symbols[enName] = fd - - for _, val := range en.Value { - valName := fqn(enName, val.GetName()) - s.symbols[valName] = fd - } -} - -func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) { - fldName := fqn(prefix, fld.GetName()) - s.symbols[fldName] = fd -} - -func fqn(prefix, name string) string { - if prefix == "" { - return name - } - return prefix + "." + name -} - -// fileDescForType gets the file descriptor for the given type. -// The given type should be a proto message. -func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - enc, _ := m.Descriptor() - - return decodeFileDesc(enc) -} - -// decodeFileDesc does decompression and unmarshalling on the given -// file descriptor byte slice. -func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { - raw, err := decompress(enc) - if err != nil { - return nil, fmt.Errorf("failed to decompress enc: %v", err) - } - - fd := new(dpb.FileDescriptorProto) - if err := proto.Unmarshal(raw, fd); err != nil { - return nil, fmt.Errorf("bad descriptor: %v", err) - } - return fd, nil +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +// +// The reflection service is only interested in the service names, but the +// signature is this way so that *grpc.Server implements it. So it is okay +// for a custom implementation to return zero values for the +// grpc.ServiceInfo values in the map. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo } -// decompress does gzip decompression. -func decompress(b []byte) ([]byte, error) { - r, err := gzip.NewReader(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %v", err) - } - out, err := ioutil.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %v", err) - } - return out, nil +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) } -func typeForName(name string) (reflect.Type, error) { - pt := proto.MessageType(name) - if pt == nil { - return nil, fmt.Errorf("unknown type: %q", name) - } - st := pt.Elem() - - return st, nil +// ServerOptions represents the options used to construct a reflection server. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerOptions struct { + // The source of advertised RPC services. If not specified, the reflection + // server will report an empty list when asked to list services. + // + // This value will typically be a *grpc.Server. But the set of advertised + // services can be customized by wrapping a *grpc.Server or using an + // alternate implementation that returns a custom set of service names. + Services ServiceInfoProvider + // Optional resolver used to load descriptors. If not specified, + // protoregistry.GlobalFiles will be used. + DescriptorResolver protodesc.Resolver + // Optional resolver used to query for known extensions. If not specified, + // protoregistry.GlobalTypes will be used. + ExtensionResolver ExtensionResolver } -func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) +// NewServer returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) rpb.ServerReflectionServer { + if opts.DescriptorResolver == nil { + opts.DescriptorResolver = protoregistry.GlobalFiles + } + if opts.ExtensionResolver == nil { + opts.ExtensionResolver = protoregistry.GlobalTypes + } + return &serverReflectionServer{ + s: opts.Services, + descResolver: opts.DescriptorResolver, + extResolver: opts.ExtensionResolver, } - - var extDesc *proto.ExtensionDesc - for id, desc := range proto.RegisteredExtensions(m) { - if id == ext { - extDesc = desc - break - } - } - - if extDesc == nil { - return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) - } - - return decodeFileDesc(proto.FileDescriptor(extDesc.Filename)) } -func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - - exts := proto.RegisteredExtensions(m) - out := make([]int32, 0, len(exts)) - for id := range exts { - out = append(out, id) - } - return out, nil +type serverReflectionServer struct { + rpb.UnimplementedServerReflectionServer + s ServiceInfoProvider + descResolver protodesc.Resolver + extResolver ExtensionResolver } // fileDescWithDependencies returns a slice of serialized fileDescriptors in // wire format ([]byte). The fileDescriptors will include fd and all the // transitive dependencies of fd with names not in sentFileDescriptors. -func fileDescWithDependencies(fd *dpb.FileDescriptorProto, sentFileDescriptors map[string]bool) ([][]byte, error) { - r := [][]byte{} - queue := []*dpb.FileDescriptorProto{fd} +func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + var r [][]byte + queue := []protoreflect.FileDescriptor{fd} for len(queue) > 0 { currentfd := queue[0] queue = queue[1:] - if sent := sentFileDescriptors[currentfd.GetName()]; len(r) == 0 || !sent { - sentFileDescriptors[currentfd.GetName()] = true - currentfdEncoded, err := proto.Marshal(currentfd) + if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { + sentFileDescriptors[currentfd.Path()] = true + fdProto := protodesc.ToFileDescriptorProto(currentfd) + currentfdEncoded, err := proto.Marshal(fdProto) if err != nil { return nil, err } r = append(r, currentfdEncoded) } - for _, dep := range currentfd.Dependency { - fdenc := proto.FileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - queue = append(queue, fdDep) + for i := 0; i < currentfd.Imports().Len(); i++ { + queue = append(queue, currentfd.Imports().Get(i)) } } return r, nil } -// fileDescEncodingByFilename finds the file descriptor for given filename, -// finds all of its previously unsent transitive dependencies, does marshalling -// on them, and returns the marshalled result. -func (s *serverReflectionServer) fileDescEncodingByFilename(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - enc := proto.FileDescriptor(name) - if enc == nil { - return nil, fmt.Errorf("unknown file: %v", name) - } - fd, err := decodeFileDesc(enc) - if err != nil { - return nil, err - } - return fileDescWithDependencies(fd, sentFileDescriptors) -} - -// parseMetadata finds the file descriptor bytes specified meta. -// For SupportPackageIsVersion4, m is the name of the proto file, we -// call proto.FileDescriptor to get the byte slice. -// For SupportPackageIsVersion3, m is a byte slice itself. -func parseMetadata(meta interface{}) ([]byte, bool) { - // Check if meta is the file name. - if fileNameForMeta, ok := meta.(string); ok { - return proto.FileDescriptor(fileNameForMeta), true - } - - // Check if meta is the byte slice. - if enc, ok := meta.([]byte); ok { - return enc, true - } - - return nil, false -} - // fileDescEncodingContainingSymbol finds the file descriptor containing the // given symbol, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. The given symbol // can be a type, a service or a method. func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - _, symbols := s.getSymbols() - fd := symbols[name] - if fd == nil { - // Check if it's a type name that was not present in the - // transitive dependencies of the registered services. - if st, err := typeForName(name); err == nil { - fd, err = s.fileDescForType(st) - if err != nil { - return nil, err - } - } - } - - if fd == nil { - return nil, fmt.Errorf("unknown symbol: %v", name) + d, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)) + if err != nil { + return nil, err } - - return fileDescWithDependencies(fd, sentFileDescriptors) + return s.fileDescWithDependencies(d.ParentFile(), sentFileDescriptors) } // fileDescEncodingContainingExtension finds the file descriptor containing // given extension, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { - st, err := typeForName(typeName) - if err != nil { - return nil, err - } - fd, err := fileDescContainingExtension(st, extNum) + xt, err := s.extResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) if err != nil { return nil, err } - return fileDescWithDependencies(fd, sentFileDescriptors) + return s.fileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) } // allExtensionNumbersForTypeName returns all extension numbers for the given type. func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { - st, err := typeForName(name) - if err != nil { - return nil, err + var numbers []int32 + s.extResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { + numbers = append(numbers, int32(xt.TypeDescriptor().Number())) + return true + }) + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] < numbers[j] + }) + if len(numbers) == 0 { + // maybe return an error if given type name is not known + if _, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { + return nil, err + } } - extNums, err := s.allExtensionNumbersForType(st) - if err != nil { - return nil, err + return numbers, nil +} + +// listServices returns the names of services this server exposes. +func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { + serviceInfo := s.s.GetServiceInfo() + resp := make([]*rpb.ServiceResponse, 0, len(serviceInfo)) + for svc := range serviceInfo { + resp = append(resp, &rpb.ServiceResponse{Name: svc}) } - return extNums, nil + sort.Slice(resp, func(i, j int) bool { + return resp[i].Name < resp[j].Name + }) + return resp } // ServerReflectionInfo is the reflection service handler. @@ -412,7 +243,11 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio } switch req := in.MessageRequest.(type) { case *rpb.ServerReflectionRequest_FileByFilename: - b, err := s.fileDescEncodingByFilename(req.FileByFilename, sentFileDescriptors) + var b [][]byte + fd, err := s.descResolver.FindFileByPath(req.FileByFilename) + if err == nil { + b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) + } if err != nil { out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ ErrorResponse: &rpb.ErrorResponse{ @@ -473,16 +308,9 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio } } case *rpb.ServerReflectionRequest_ListServices: - svcNames, _ := s.getSymbols() - serviceResponses := make([]*rpb.ServiceResponse, len(svcNames)) - for i, n := range svcNames { - serviceResponses[i] = &rpb.ServiceResponse{ - Name: n, - } - } out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ ListServicesResponse: &rpb.ListServiceResponse{ - Service: serviceResponses, + Service: s.listServices(), }, } default: diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index a0a71aae96..99db79fafc 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH} mkdir -p ${GOBIN} echo "remove existing generated files" -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) @@ -68,7 +68,6 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto ${WORKDIR}/grpc-proto/grpc/testing/*.proto ${WORKDIR}/grpc-proto/grpc/core/*.proto ) @@ -76,7 +75,20 @@ SOURCES=( # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an # import path of 'bar' in the generated code when 'foo.proto' is imported in # one of the sources. -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing for src in ${SOURCES[@]}; do echo "protoc ${src}" @@ -85,7 +97,6 @@ for src in ${SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done @@ -96,7 +107,6 @@ for src in ${LEGACY_SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done @@ -105,12 +115,9 @@ done mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go - -# grpc/service_config/service_config.proto does not have a go_package option. -mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go # grpc/testing does not have a go_package option. mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index e87ecd0eeb..efcb7f3efd 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -28,25 +28,40 @@ type addressMapEntry struct { // Multiple accesses may not be performed concurrently. Must be created via // NewAddressMap; do not construct directly. type AddressMap struct { - m map[string]addressMapEntryList + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} } type addressMapEntryList []*addressMapEntry // NewAddressMap creates a new AddressMap. func NewAddressMap() *AddressMap { - return &AddressMap{m: make(map[string]addressMapEntryList)} + return &AddressMap{m: make(map[Address]addressMapEntryList)} } // find returns the index of addr in the addressMapEntry slice, or -1 if not // present. func (l addressMapEntryList) find(addr Address) int { - if len(l) == 0 { - return -1 - } for i, entry := range l { - if entry.addr.ServerName == addr.ServerName && - entry.addr.Attributes.Equal(addr.Attributes) { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { return i } } @@ -55,7 +70,8 @@ func (l addressMapEntryList) find(addr Address) int { // Get returns the value for the address in the map, if present. func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { return entryList[entry].value, true } @@ -64,17 +80,19 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { // Set updates or adds the value to the address in the map. func (a *AddressMap) Set(addr Address, value interface{}) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { - a.m[addr.Addr][entry].value = value + entryList[entry].value = value return } - a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value}) + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) } // Delete removes addr from the map. func (a *AddressMap) Delete(addr Address) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] entry := entryList.find(addr) if entry == -1 { return @@ -85,7 +103,7 @@ func (a *AddressMap) Delete(addr Address) { copy(entryList[entry:], entryList[entry+1:]) entryList = entryList[:len(entryList)-1] } - a.m[addr.Addr] = entryList + a.m[addrKey] = entryList } // Len returns the number of entries in the map. @@ -107,3 +125,14 @@ func (a *AddressMap) Keys() []Address { } return ret } + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []interface{} { + ret := make([]interface{}, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index e28b680260..ca2e35a359 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -139,13 +140,18 @@ type Address struct { // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. -func (a *Address) Equal(o Address) bool { +func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && a.Type == o.Type && a.Metadata == o.Metadata } +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + return pretty.ToJSON(a) +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 2c47cd54f0..05a9d4e0ba 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,7 +19,6 @@ package grpc import ( - "fmt" "strings" "sync" @@ -27,6 +26,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -97,10 +97,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { if ccr.done.HasFired() { return nil } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) - } + ccr.addChannelzTraceEvent(s) ccr.curState = s if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { return balancer.ErrBadResolverState @@ -125,10 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) ccr.curState.Addresses = addrs ccr.cc.updateResolverState(ccr.curState, nil) } @@ -141,7 +135,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) if ccr.cc.dopts.disableServiceConfig { channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return @@ -151,9 +145,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) return } - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) ccr.curState.ServiceConfig = scpr ccr.cc.updateResolverState(ccr.curState, nil) } @@ -180,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtInfo, - }) + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index eadf9e05fd..b54f5bb572 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -73,6 +73,12 @@ func init() { internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } + internal.AddExtraServerOptions = func(opt ...ServerOption) { + extraServerOptions = opt + } + internal.ClearExtraServerOptions = func() { + extraServerOptions = nil + } } var statusOK = status.New(codes.OK, "") @@ -134,7 +140,7 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData serverWorkerChannels []chan *serverWorkerData @@ -150,7 +156,7 @@ type serverOptions struct { chainUnaryInts []UnaryServerInterceptor chainStreamInts []StreamServerInterceptor inTapHandle tap.ServerInHandle - statsHandler stats.Handler + statsHandlers []stats.Handler maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int @@ -174,6 +180,7 @@ var defaultServerOptions = serverOptions{ writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, } +var extraServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -435,7 +442,7 @@ func InTapHandle(h tap.ServerInHandle) ServerOption { // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.statsHandler = h + o.statsHandlers = append(o.statsHandlers, h) }) } @@ -560,6 +567,9 @@ func (s *Server) stopServerWorkers() { // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions + for _, o := range extraServerOptions { + o.apply(&opts) + } for _, o := range opt { o.apply(&opts) } @@ -584,9 +594,8 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } @@ -712,7 +721,7 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped") type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -724,9 +733,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -759,11 +767,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -773,8 +776,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -866,7 +877,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ConnectionTimeout: s.opts.connectionTimeout, Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, + StatsHandlers: s.opts.statsHandlers, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -962,7 +973,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -1075,8 +1086,10 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) - if err == nil && s.opts.statsHandler != nil { - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + if err == nil { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } } return err } @@ -1123,13 +1136,13 @@ func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerIn } func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - sh := s.opts.statsHandler - if sh != nil || trInfo != nil || channelz.IsOn() { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - if sh != nil { + for _, sh := range shs { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, @@ -1160,7 +1173,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.Finish() } - if sh != nil { + for _, sh := range shs { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1242,7 +1255,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } var payInfo *payloadInfo - if sh != nil || binlog != nil { + if len(shs) != 0 || binlog != nil { payInfo = &payloadInfo{} } d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) @@ -1259,7 +1272,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - if sh != nil { + for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ RecvTime: time.Now(), Payload: v, @@ -1283,9 +1296,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) @@ -1416,16 +1430,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if channelz.IsOn() { s.incrCallsStarted() } - sh := s.opts.statsHandler + shs := s.opts.statsHandlers var statsBegin *stats.Begin - if sh != nil { + if len(shs) != 0 { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - sh.HandleRPC(stream.Context(), statsBegin) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), statsBegin) + } } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ @@ -1437,10 +1453,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: sh, + statsHandler: shs, } - if sh != nil || trInfo != nil || channelz.IsOn() { + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1454,7 +1470,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } - if sh != nil { + if len(shs) != 0 { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1462,7 +1478,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), end) + } } if channelz.IsOn() { @@ -1549,7 +1567,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { @@ -1706,11 +1726,7 @@ func (s *Server) Stop() { s.done.Fire() }() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() listeners := s.lis @@ -1748,11 +1764,7 @@ func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() @@ -1805,12 +1817,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1822,8 +1848,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1837,6 +1869,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 22c4240cf7..b01c548bb9 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -218,7 +218,7 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfigForTesting = parseServiceConfig + internal.ParseServiceConfig = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { @@ -381,6 +381,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 625d47b34e..6d82e0d7cc 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/transport" @@ -46,10 +47,12 @@ import ( ) // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used @@ -164,6 +167,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -295,14 +303,28 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } cs.binlog = binarylog.GetMethodLogger(method) - if err := cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */) + if err != nil { cs.finish(err) return nil, err } - op := func(a *csAttempt) error { return a.newStream() } + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) return nil, err } @@ -341,14 +363,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) + } + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) method := cs.callHdr.Method - sh := cs.cc.dopts.copts.StatsHandler var beginTime time.Time - if sh != nil { + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) beginTime = time.Now() begin := &stats.Begin{ @@ -377,27 +405,6 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { ctx = trace.NewContext(ctx, trInfo.tr) } - newAttempt := &csAttempt{ - ctx: ctx, - beginTime: beginTime, - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, - } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) - } - }() - - if err := ctx.Err(); err != nil { - return toRPCErr(err) - } - if cs.cc.parsedTarget.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. @@ -405,16 +412,32 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } - t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } @@ -423,12 +446,21 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - // Return without converting to an RPC error so retry code can - // inspect. - return err + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.p = &parser{r: s} return nil } @@ -454,7 +486,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlog binarylog.MethodLogger // Binary logger, can be nil. // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -504,8 +536,13 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandler stats.Handler - beginTime time.Time + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { @@ -525,41 +562,21 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. If the RPC should be // retried, the bool indicates whether it is being retried transparently. -func (cs *clientStream) shouldRetry(err error) (bool, error) { - if cs.attempt.s == nil { - // Error from NewClientStream. - nse, ok := err.(*transport.NewStreamError) - if !ok { - // Unexpected, but assume no I/O was performed and the RPC is not - // fatal, so retry indefinitely. - return true, nil - } - - // Unwrap and convert error. - err = toRPCErr(nse.Err) - - // Never retry DoNotRetry errors, which indicate the RPC should not be - // retried due to max header list size violation, etc. - if nse.DoNotRetry { - return false, err - } +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs - // In the event of a non-IO operation error from NewStream, we never - // attempted to write anything to the wire, so we can retry - // indefinitely. - if !nse.DoNotTransparentRetry { - return true, nil - } - } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. return false, err } + if a.s == nil && a.allowTransparentRetry { + return true, nil + } // Wait for the trailers. unprocessed := false - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - unprocessed = cs.attempt.s.Unprocessed() + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. @@ -571,14 +588,14 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if !cs.attempt.s.TrailersOnly() { + if a.s != nil { + if !a.s.TrailersOnly() { return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { @@ -595,10 +612,10 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } rp := cs.methodConfig.RetryPolicy @@ -643,19 +660,24 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(toRPCErr(lastErr)) - isTransparent, err := cs.shouldRetry(lastErr) + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(isTransparent); err != nil { + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -665,7 +687,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -695,7 +720,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -726,7 +751,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true } - return m, err + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -744,10 +769,9 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { for _, f := range cs.buffer { - if err := f(a); err != nil { + if err := f(attempt); err != nil { return err } } @@ -795,22 +819,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err + return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if cs.binlog != nil && err == nil { cs.binlog.Log(&binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, + Message: data, }) } - return + return err } func (cs *clientStream) RecvMsg(m interface{}) error { @@ -941,8 +960,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { } return io.EOF } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -952,7 +971,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { cs := a.cs - if a.statsHandler != nil && payInfo == nil { + if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} } @@ -989,8 +1008,8 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1049,7 +1068,7 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - if a.statsHandler != nil { + for _, sh := range a.statsHandlers { end := &stats.End{ Client: true, BeginTime: a.beginTime, @@ -1057,7 +1076,7 @@ func (a *csAttempt) finish(err error) { Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.ctx, end) + sh.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1362,8 +1381,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1424,9 +1445,9 @@ type serverStream struct { maxSendMessageSize int trInfo *traceInfo - statsHandler stats.Handler + statsHandler []stats.Handler - binlog *binarylog.MethodLogger + binlog binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1446,11 +1467,20 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) if ss.binlog != nil && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() ss.binlog.Log(&binarylog.ServerHeader{ @@ -1465,6 +1495,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } @@ -1522,8 +1555,10 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { Message: data, }) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } } return nil } @@ -1557,7 +1592,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } }() var payInfo *payloadInfo - if ss.statsHandler != nil || ss.binlog != nil { + if len(ss.statsHandler) != 0 || ss.binlog != nil { payInfo = &payloadInfo{} } if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { @@ -1572,15 +1607,17 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return toRPCErr(err) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), - }) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } } if ss.binlog != nil { ss.binlog.Log(&binarylog.ClientMessage{ diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 8ef0958797..0eb2998cbe 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.43.0" +const Version = "1.48.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index d923187a7b..ceb436c6ce 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -107,7 +107,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy git status --porcelain 2>&1 | fail_on_output || \ diff --git a/vendor/modules.txt b/vendor/modules.txt index 626e6be897..7ed68bddcd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -700,6 +700,7 @@ github.com/portworx/kdmp/pkg/client/clientset/versioned/scheme github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1 github.com/portworx/kdmp/pkg/controllers github.com/portworx/kdmp/pkg/controllers/dataexport +github.com/portworx/kdmp/pkg/controllers/resourceexport github.com/portworx/kdmp/pkg/drivers github.com/portworx/kdmp/pkg/drivers/driversinstance github.com/portworx/kdmp/pkg/drivers/kopiabackup @@ -735,7 +736,7 @@ github.com/portworx/px-object-controller/client/listers/objectservice/v1alpha1 github.com/portworx/px-object-controller/pkg/client github.com/portworx/px-object-controller/pkg/controller github.com/portworx/px-object-controller/pkg/utils -# github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 => github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a +# github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 => github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 ## explicit github.com/portworx/sched-ops/k8s/admissionregistration github.com/portworx/sched-ops/k8s/apiextensions @@ -1056,7 +1057,7 @@ google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.43.0 +# google.golang.org/grpc v1.48.0 ## explicit google.golang.org/grpc google.golang.org/grpc/attributes @@ -1068,6 +1069,7 @@ google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials @@ -1086,6 +1088,7 @@ google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer @@ -1098,6 +1101,7 @@ google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough @@ -1911,7 +1915,7 @@ sigs.k8s.io/yaml # github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc10 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 # github.com/libopenstorage/autopilot-api => github.com/libopenstorage/autopilot-api v0.6.1-0.20210301232050-ca2633c6e114 # github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20220707215604-afbea03c04c5 -# github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a +# github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 # github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 # gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 # helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.1 From f313b1bef441894f0e2d19e2bb381eed1cd8b706 Mon Sep 17 00:00:00 2001 From: Lalatendu Das Date: Sun, 6 Nov 2022 16:24:19 +0000 Subject: [PATCH 74/97] pb-3213: Send the triggeredFrom values to nfs specific CRs The image registry and namespace details are passed via CR. This is passed to JOB to determine the executor image location. Signed-off-by: Lalatendu Das --- pkg/applicationmanager/controllers/applicationbackup.go | 9 +++++++++ pkg/applicationmanager/controllers/applicationrestore.go | 8 ++++++++ 2 files changed, 17 insertions(+) diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index 1af8769cbb..48b2a5529e 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -28,6 +28,7 @@ import ( "github.com/libopenstorage/stork/pkg/utils" "github.com/libopenstorage/stork/pkg/version" kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + kdmputils "github.com/portworx/kdmp/pkg/drivers/utils" "github.com/portworx/sched-ops/k8s/apiextensions" "github.com/portworx/sched-ops/k8s/core" kdmpShedOps "github.com/portworx/sched-ops/k8s/kdmp" @@ -1384,8 +1385,16 @@ func (a *ApplicationBackupController) backupResources( Namespace: backupLocation.Namespace, Name: backupLocation.Name, } + resourceExport.Spec.TriggeredFrom = kdmputils.TriggeredFromStork + storkPodNs, err := k8sutils.GetStorkPodNamespace() + if err != nil { + logrus.Errorf("error in getting stork pod namespace: %v", err) + return err + } + resourceExport.Spec.TriggeredFromNs = storkPodNs resourceExport.Spec.Source = *source resourceExport.Spec.Destination = *destination + _, err = kdmpShedOps.Instance().CreateResourceExport(resourceExport) if err != nil { logrus.Errorf("failed to create DataExport CR: %v", err) diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index ca73f400f3..831948ef80 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -21,6 +21,7 @@ import ( "github.com/libopenstorage/stork/pkg/utils" "github.com/libopenstorage/stork/pkg/version" kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + kdmputils "github.com/portworx/kdmp/pkg/drivers/utils" "github.com/portworx/sched-ops/k8s/apiextensions" "github.com/portworx/sched-ops/k8s/core" kdmpShedOps "github.com/portworx/sched-ops/k8s/kdmp" @@ -691,6 +692,13 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat Namespace: backupLocation.Namespace, Name: backupLocation.Name, } + resourceExport.Spec.TriggeredFrom = kdmputils.TriggeredFromStork + storkPodNs, err := k8sutils.GetStorkPodNamespace() + if err != nil { + logrus.Errorf("error in getting stork pod namespace: %v", err) + return err + } + resourceExport.Spec.TriggeredFromNs = storkPodNs resourceExport.Spec.Source = *source resourceExport.Spec.Destination = *destination _, err = kdmpShedOps.Instance().CreateResourceExport(resourceExport) From 407d0d06c5244e871098929795c78ed6dba23bfe Mon Sep 17 00:00:00 2001 From: Lalatendu Das Date: Mon, 7 Nov 2022 12:41:57 +0530 Subject: [PATCH 75/97] Delete how ed4bac3825565902733b151138c32367231e48b3 Review Comments Fixing - This is an unwanted file got created during some git operation, hence need to be deleted. --- how ed4bac3825565902733b151138c32367231e48b3 | 11072 ----------------- 1 file changed, 11072 deletions(-) delete mode 100644 how ed4bac3825565902733b151138c32367231e48b3 diff --git a/how ed4bac3825565902733b151138c32367231e48b3 b/how ed4bac3825565902733b151138c32367231e48b3 deleted file mode 100644 index e824a16359..0000000000 --- a/how ed4bac3825565902733b151138c32367231e48b3 +++ /dev/null @@ -1,11072 +0,0 @@ -commit ed4bac3825565902733b151138c32367231e48b3 (HEAD -> pb-2939, origin/pb-2939) -Author: Lalatendu Das -Date: Wed Jul 27 06:48:28 2022 +0000 - - pb-2939: enable NFS type backuplocation - - - added NFS type to stork v1alpha1 APIs - - added objectlock specific API for for NFS - - skipped bucket exist check in Application backup - controller specifically for NFS scenario - - Signed-off-by: Lalatendu Das - -commit 207a503c45ae3f373f558b32a27b2a20e2199930 (origin/nfs-ea, origin/master, origin/HEAD, nfs-ea, master) -Author: sivakumar subraani -Date: Wed Aug 31 14:36:10 2022 +0000 - - pb-3005: Added fix to include the CRDs even if CR is are present. - - - With fix, we will include all the CRDs of a group, if one CRDs - of a parrticular group had a CR in the given namespace. - -commit fae173e9c7cecf9de0db8f1032a894b6a2b88084 -Author: Aditya Dani -Date: Tue Aug 30 09:36:57 2022 -0700 - - Log when stork takes a forceful snapshot - -commit 72bd1fe2809fa757332807c204eb7aee5b02e1cd -Author: Ram -Date: Mon Aug 29 18:21:28 2022 +0530 - - Force full backup on specified day in daily schedule - - Signed-off-by: Ram - -commit 25b400055100500a8b4bcf8fc2e21c533ee1487f -Author: Ram -Date: Sat Aug 27 14:57:33 2022 +0530 - - vendor update - sched-ops master - - Signed-off-by: Ram - -commit 0b54138f41d94eef252454d7f9ebd052a5ab4f59 -Author: Ram -Date: Sat Aug 27 14:57:05 2022 +0530 - - pwx-26151: skip collecting endpoints for headless service - - Signed-off-by: Ram - -commit 9782bc0f44250054346ac0d5089ce21da68d7fc8 -Author: Rohit-PX -Date: Mon Aug 15 22:26:01 2022 +0000 - - vendor updates - - Signed-off-by: Rohit-PX - -commit d630f1478d0ed353bb97b72990ba8f7e44d31b0b -Author: Ram -Date: Fri Aug 26 12:46:06 2022 +0530 - - Ensure to re-run transformation validation before each migration run - - Signed-off-by: Ram - -commit d3d4c9dd3d51f0e0b24f30f66739aa980f1eb49e -Author: Ram -Date: Wed Aug 24 20:27:59 2022 +0530 - - vendor update sched-ops - - Signed-off-by: Ram - -commit 5c6a98c625fba875466959c0c9f2b5899cf8fde0 -Author: Ram -Date: Wed Aug 24 20:27:04 2022 +0530 - - Dry-run resource transformation validation during migration prechecks - - - validate transform CR - - dry-run on newly detected object before starting migration - - Signed-off-by: Ram - -commit 338ee5d16d70740c1a65d6c667dc01789f2ed9a7 -Author: Ram -Date: Wed Aug 24 19:32:48 2022 +0530 - - Allow enable/disable resource transformation controller - - - addressed review comments - - Signed-off-by: Ram - -commit 0e2f00364d25c4d50908018cffa151782577c62b -Author: Ram -Date: Thu Aug 18 19:57:28 2022 +0530 - - vendor updates stork sched ops - - Signed-off-by: Ram - -commit 4307eea88180b29a575ebaa981e65e17e65f76ff -Author: Ram -Date: Thu Aug 18 19:56:24 2022 +0530 - - Add transformation rule handler in resourcecollector - - - allow dry run for keypair and slice value type - - Signed-off-by: Ram - -commit 71a23be2d835d72dd7085cd7cd8e9ab388e13955 -Author: Ram -Date: Thu Aug 18 19:54:48 2022 +0530 - - pwx-24979: integrate transform resource api with migration path - - - accept resource transformation in migration spec - - update resource as per transformation rule - - Signed-off-by: Ram - -commit 8e7b43a5ed1451e9bbf52ed7ded0c08c6066ae7c -Author: Ram -Date: Mon Aug 1 22:11:18 2022 +0530 - - PWx-24851: Enhance UX experience for setting up clusterpair for async-dr setups - - - query cluster pair token using px endpoint + port - - query port by looking at px-api service rest port - - Signed-off-by: Ram - -commit f7504fc6481ec72836f12b45cdba1f2d7200bf5e -Author: Ram -Date: Tue Aug 9 18:20:24 2022 +0530 - - Register and handle Resource Transformation events via controller - - - validate specs for resource transformation cr - - apply patch on unstruct k8s objects - - run patched resources on dry run namespace with DryRun option set to - all - - Signed-off-by: Ram - -commit d88159e3c4fb7280d2dcb9366f48663e40863dd6 -Author: Ram -Date: Mon Aug 1 15:14:30 2022 +0530 - - PWX-24976: Register ResourceTransformation CR api - - Signed-off-by: Ram - -commit 33f1d74c6b17890b6ab028b28ca45cc2e5ad715a -Author: Ram -Date: Mon Aug 1 14:41:51 2022 +0530 - - codegen generated file for resource transformation CR - - Signed-off-by: Ram - -commit 60d0c119d70efb14b7471869a8ec4073eb5bc1ab -Author: Ram -Date: Mon Aug 1 11:49:50 2022 +0530 - - PWX-26033: Dont include FA/FB device for migration - - Signed-off-by: Ram - -commit eeb4e468b4a5b30428615ab57dc0388f1c21a254 -Author: Luke Pitstick -Date: Fri Aug 5 14:08:53 2022 -0600 - - hold off on clusterpair port changes - - Signed-off-by: Luke Pitstick - -commit e7f47e6ad9ba6887719c0eb75eb29427a8f22f10 -Author: Luke Pitstick -Date: Thu Aug 4 16:56:37 2022 -0600 - - Vendor update openstorage - - Signed-off-by: Luke Pitstick - -commit 4f0688cc42c91656ae13c1648bc99d039c4d5305 -Author: Luke Pitstick -Date: Tue Aug 2 19:31:51 2022 -0600 - - Deal with clusterpair ports later - - Signed-off-by: Luke Pitstick - -commit 3949621aec5bfbc197c9096d6f97a9c156fc5ea7 -Author: Luke Pitstick -Date: Fri Jul 29 14:50:54 2022 -0600 - - vendor openstorage - - Signed-off-by: Luke Pitstick - -commit 393d44c93c5d34d4e53fde9a048bd3c423413c2c -Author: Mudassir Latif -Date: Tue Apr 20 02:09:28 2021 +0000 - - Stork should use the new secure port - - If tls is enabled, use tls.config generated by the openstorage - library helper - - Signed-off-by: Mudassir Latif - -commit c83f1e7b3f5c0e00f916fc018ce5f81bf52f140b -Author: Priyanshu Pandey -Date: Wed Aug 17 00:09:57 2022 -0600 - - PWX-26330: Disable px-object-controller by default. - - Signed-off-by: Priyanshu Pandey - -commit 27c04a2ab7eee15bdd4f09dbee8c6eda66e4aad6 -Author: sivakumar subraani -Date: Sat Aug 6 13:02:05 2022 +0000 - - pb-3000: Added debug statement in GetObjLockInfo api - -commit 30cda4f7559413a7b355231291aa12de51c04ae9 -Author: Lalatendu Das -Date: Wed Aug 10 07:38:04 2022 +0000 - - pb-3002: call v1 version CRD API for k8s 1.22 or more. - - Fixed a v1beta1 based getCRD call which will fail for k8s-1.22 or more - because these APIs are removed in k8s-1.22 onwards. - - Signed-off-by: Lalatendu Das - -commit 09d16ab442dc1074ae7fcb8fb349976355f2d25c -Author: Priyanshu Pandey -Date: Wed Aug 10 13:43:30 2022 -0600 - - PWX-26225: Error in starting px-object-controller should not throw fatal error. - - Signed-off-by: Priyanshu Pandey - -commit 1fbe01b49d18a03b33708bd38260954fa1cf1a97 -Author: Priyanshu Pandey -Date: Thu Aug 4 18:22:26 2022 -0600 - - PWX-26049: Vendor updated px-object-controller to fix cache initialization, delete error and multitenancy. - - Signed-off-by: Priyanshu Pandey - -commit 914167074236981785d8877f228248bcaf8bdce3 -Author: Priyanshu Pandey -Date: Thu Jul 28 19:34:28 2022 -0600 - - PWX-24682: Fixing static check issues. - - Signed-off-by: Priyanshu Pandey - -commit 0961a8a9baf9e4db540022dea6dd92ecd5e03ac0 -Author: Priyanshu Pandey -Date: Thu Jul 28 15:18:45 2022 -0600 - - PWX-24682: Vendor px-object-controller and start it to use px sdk server. - - Signed-off-by: Priyanshu Pandey - -commit c55ae997e904d27186195ffc9aabb27621c89321 (nfs-feature-master-branch) -Author: sivakumar subraani -Date: Thu Jul 21 08:05:49 2022 +0000 - - pb-2279: Added fixes to take care new EncryptionV2Key variable in - backuplocation. - - - Replace reference to EncryptionKey to EncryptionV2Key - - If decrypt function, assume it to be uncrypted and try using - data directly. - -commit 114ffbf85f353c2e5ffac0e576ce9ebfd1e9352b -Author: sivakumar subraani -Date: Tue Jul 12 05:46:26 2022 +0000 - - pb-2279: Added new variable for encryption key in backuplocation CR definition - -commit a35d4636f8aa5c7fe5bf335eb339a77887f994a6 -Author: Rohit-PX -Date: Wed Jul 20 23:05:16 2022 +0000 - - Rename webhook tests to be picked as part of extender tests - - Signed-off-by: Rohit-PX - -commit 221afd7223485afd9ab6db39dad999a0efd50de3 -Author: Neelesh Thakur -Date: Fri Jul 15 19:06:12 2022 -0600 - - PTX-10293: added tests for the webhook changes for virt-launcher pod - - Tests to verify that the virt-launcher pod will return "nfs" as - the file system type for regular and encrypted PX volumes. - - We use "kubevirt.io: virt-launcher" label to simulate the virt-launcher pod. - - Also, verify that the pods without the virt-launcher label will return - the correct FS type depending on it is a bind-mount or an nfs-mount. - - Signed-off-by: Neelesh Thakur - -commit 5c3c9b63fa1d89a41c212aef032447a9e435f910 -Author: Aditya Dani -Date: Fri Jul 15 16:27:08 2022 -0700 - - Add missing rancher labels to Service spec in rancher - -commit dd549f812cdb5bc38627ac24c18468684e480215 (origin/master_nfs_upload) -Author: Ram -Date: Tue Jul 5 21:06:49 2022 +0530 - - vendor updates for torpedo,schedops libs - - Signed-off-by: Ram - -commit 5d079db193a9592e1d838d557adc4cf133ca2688 -Author: Ram -Date: Tue Jul 5 21:05:04 2022 +0530 - - integration test for migration of endpoints, networkpolicy resource - - Signed-off-by: Ram - -commit ef512411ebc0467cb3f2ae9b3f33ca81c3eef4c4 -Author: Rohit-PX -Date: Tue Jul 12 23:40:08 2022 +0000 - - Vendor updates - - Signed-off-by: Rohit-PX - -commit c4447251aa35ac43c3db09eadb504fa2fb3a3ff9 -Author: Rohit-PX -Date: Fri Jul 8 18:12:37 2022 +0000 - - Add CBT suite to be run for every check-in to stork - - Signed-off-by: Rohit-PX - -commit 3e79dc8074385c0c40973549f6eb666fb8f950f6 -Author: Neelesh Thakur -Date: Wed Jun 29 13:07:11 2022 -0600 - - PWX-24637: mutate the virt-launcher container to intercept statfs() - - Live migration of KubeVirt VM fails if the VM is using - a bind-mounted sharedv4 volume. The root cause is that libvirt - uses a statfs() call to check the file system type and - incorrectly concludes that the volume is not shared. - - This patch addresses this problem as described below. - - We use a shared library px_statfs.so that intercepts libvirt's statfs call. - If the input path is backed by a PX volume, we change the file system - type returned by the real statfs call. This shared library is bundled with - the stork container. - - If a virt-launcher pod is being created and is using a PX volume, stork's - mutating webhook creates a ConfigMap in the pod's namespace. - - This configMap has 2 keys that represent the 2 files that we want to inject - into the virt-launcher container's /etc directory: - - - ld.so.preload - - px_statfs.so - - The stork webhook then mutates the virt-launcher pod's spec to mount - the configMap as a volume and inject the 2 files above in /etc dir on the - container's file system. This makes linux load px_statfs.so in the libvirt - process that is running inside the virt-launcher container and intercept - libvirts' statfs() call. - - Signed-off-by: Neelesh Thakur - -commit a0abc0790f8c47a61c7d985e6050ae5402eb4b74 -Author: sivakumar subraani -Date: Mon Jul 11 09:45:20 2022 +0000 - - pb-2904: vendored latest kdmp repo changes. - -commit 7cb8bb703e541c973d34dd9dec744b3cafac38b2 -Author: Ram -Date: Mon Jul 4 13:32:25 2022 +0530 - - Collect manually created endpoint resources for backup and migration - - Signed-off-by: Ram - -commit 02e473c7ac2d8735d3946db626811e92aef16bce -Author: Ram -Date: Fri Jul 1 09:46:49 2022 +0530 - - add support for endpoint object collection - - Signed-off-by: Ram - -commit 7bc6cb9cd0b1a5b3f619541ae6fe99d2f0a9b6e7 -Author: sivakumar subraani -Date: Sat Jul 9 03:48:16 2022 +0000 - - pb-2903: Added error handling for GetObjLockInfo api for cloudian - objectstore. - - - In the case of cloudian objectore, GetObjectLockConfiguration - api was returning ObjectLockConfigurationNotFound as error. - - So added the error check to handle ObjectLockConfigurationNotFound error as well - -commit 0dc4b5c3f9e51882f4aaa6422ed22c61db97db0e -Author: Aditya Dani -Date: Tue Jun 7 09:47:29 2022 -0700 - - PWX-24046: Add PlatformOptions to ClusterPair spec. - - PlatformOptions allow users to specify any configurations required for - kubernetes platform providers like Rancher / EKS / AKS etc - - Currently the PlatformOptions are only being used for Rancher. Each platform can define their - own spec within this PlatformOptions. - - RancherSpec: - - ProjectMappings: Allows users to configure source and target cluster projectID mappings - so that stork can apply the correct annotations on the destination project where it - is migrating the k8s resources. - - RancherSecret (FUTURE): Proposal for specifying a kubernetes secret to specify rancher api keys that would - be used to send REST APIs to Rancher endpoint for creating/deleting/getting project details - - NetworkPolicy / All Affinity referencing objects - Deployment/StatefulSet etc - - Parse a pod spec and if NamespaceSelectors are set, check if there are any rancher project labels - and replace them with the target project ID mapping. - - Pass resource collector options in every resource collector API - - - The resource collector instance is a common instance used in backup - and migration controllers. A common instance cannot dictate the options - used for different migration or backup objects. - - - Instead of a global map on the resource collector options use an actual - Options object and pass it as an argument to every API. - - PWX-24046: Integration test fixes to support Rancher Project Mappings - - - Currently the tests will run on vanilla k8s clusters but the - specs are simulated as if they were created for Rancher cluster by applying - project labels to them` - -commit f2ecc587a5498c509539dd7a017a649d53af74df (origin/pb-2279-new) -Author: Ram -Date: Thu Jul 7 19:24:59 2022 +0530 - - Remove deprecated go lint check - - Signed-off-by: Ram - -commit 6f3d4011761594a84e3f807aff737b9631cca195 -Author: Ram -Date: Tue Jun 14 18:55:20 2022 +0530 - - add options to collect all network policy - - Signed-off-by: Ram - -commit fc773c955dd296224b1c91a6c4d0193a36fc6131 -Author: Rohit-PX -Date: Wed Jun 29 00:23:44 2022 +0000 - - Add namespace to context so that restore gets created in the right namespace - - Signed-off-by: Rohit-PX - -commit 2b41fb3302fac2987e1033aba84d13ddba25086d -Author: Lalatendu Das -Date: Mon Jun 27 16:42:02 2022 +0000 - - pb-2867: fix restore path issue for CSI backups - - - fix the variable scope related issue in the restore logic of CSI backup. - - changed the switch case to have right type assertion check - - Signed-off-by: Lalatendu Das - -commit 567bac837623c1cf6d4c2384b68a909519e8f382 -Author: Lalatendu Das -Date: Fri Apr 8 16:01:50 2022 +0000 - - pb-2037: Making snapshot timeout configurable - - Added a configMap to change the local-snapshot timeout period. - This helps in addressing certain version of OCS cluster issues - wherein it takes genuinely more time - - Signed-off-by: Lalatendu Das - -commit 7c76e059bbd14a00086fcafa5d7f0a0e3ef64a72 -Author: Ozgur Gul -Date: Mon Jun 20 12:24:55 2022 +0100 - - Removed apos - -commit 03127f2704d19dbeb8cfaf71159054f54c97a282 -Author: Lalatendu Das -Date: Wed Jun 15 16:23:06 2022 +0000 - - pb-2836: Fixing some typecasting error related to v1beta1 volumesnapshot - - Wrong typecasting causes crash while duplicating backup and executing - other backup operations. Fixed by changing to correct typecasting. - - Signed-off-by: Lalatendu Das - -commit e1af2420fa3c94c43df760073ee4870b1fadbb7b -Author: Lalatendu Das -Date: Mon Jun 13 04:11:02 2022 +0000 - - pb-2328: vendored kdmp changes - - Vendored latest kdmp for supporting v1 & v1beta1 volumesnapshot CRD - - Signed-off-by: Lalatendu Das - -commit 78361560475e6722100b36d1c319f28154c00a0b -Author: Lalatendu Das -Date: Mon Jun 6 10:30:07 2022 +0000 - - pb-2328: Support both v1 & v1beta1 version of volumeSnapshot - - - Added support for V1 VolumeSnapshot APIs which is GA from - kubernetes 1.20 version. - - Signed-off-by: Lalatendu Das - -commit 7c9d3c0c2d63b57e606841ea0d3a2b86be227a7e (origin/master_sched_restore) -Author: Aditya Dani -Date: Tue Jun 7 19:16:00 2022 +0000 - - PWX-24000: Treat pure backend block volumes as Portworx volumes. - -commit 2836033676a1a6a205e495abffa0cf146342e37d -Author: Jindrich Zak -Date: Wed May 25 13:38:09 2022 +0200 - - Extract populating podNames. - -commit 879e0d32d1302a72c177cb859826eaefe74e2aff -Author: Jindrich Zak -Date: Wed May 25 13:06:37 2022 +0200 - - Improve variable names and comments. - -commit b83100db2f7204d12aee27c7830cfb8a959f4692 -Author: Jindrich Zak -Date: Mon May 23 17:05:14 2022 +0200 - - DS-2051: Add selector and namespace args. - -commit d38953726d0051f71e25af910357b9279e3b3dd3 -Author: Ram -Date: Tue Jun 7 21:46:08 2022 +0530 - - PWX-24245: Register VM object with suspendOption - - Signed-off-by: Ram - -commit 17183ee239b22f49faa79e67f076f66e7780bec1 -Author: Ram -Date: Mon Jun 6 22:57:06 2022 +0530 - - Do not list all k8s resources for volumeonly migration - - Signed-off-by: Ram - -commit dbe85f5ebd61b81ea9e7525590d5696afb619966 -Author: Luke Pitstick -Date: Fri Jun 3 10:26:32 2022 -0600 - - PWX-24190 Fix elapsed volume migration time when completed - -commit 19a108754d893579aa5fbadc8cabf23fef3a8532 (origin/siva-hack) -Author: ram-infrac <35250819+ram-infrac@users.noreply.github.com> -Date: Wed May 25 09:48:53 2022 +0530 - - Revert "support for k8s 1.22 kube-scheduler" - - This reverts commit e894216546860d5045d61a9de6e6bbcc7f1907c8. - -commit be647da3bb6dd51030be83ffcc6fa01f67254013 -Author: ram-infrac <35250819+ram-infrac@users.noreply.github.com> -Date: Wed May 25 09:48:53 2022 +0530 - - Revert "add namespace resource permission for stork-scheduler clusterrole" - - This reverts commit 506827a08711999512d473702116884e93a00a94. - -commit 2ee1892bf48a8d67324799fe58d68dc0c66507ac -Author: sivakumar subraani -Date: Mon May 23 10:03:24 2022 +0000 - - pb-2375: Fixed issue in handling the return value of GetObjectLockConfiguration call from FB and Dell ECS objecstore, when bucket is not enabled with lock - -commit 909dd2bfec71148ec982a3900a19682bc1d6f8e0 (origin/pb-2377) -Author: Ram -Date: Mon May 9 18:51:20 2022 +0530 - - PWX-23656 Support for removing field during activate/deactivate migration - - Signed-off-by: Ram - -commit 9f9672e04a290190cc499ee618740dbb4bb2a845 -Author: Ram -Date: Mon May 9 18:49:27 2022 +0530 - - PWX-23579: parallelize application activation/deactivation - - Signed-off-by: Ram - -commit 59d86634e3ede548e63a15d4f1118c6f3a6000c5 -Author: Serhii Aheienko -Date: Fri May 13 10:02:31 2022 +0300 - - cmdexecutor: use a unique wait.sh script name per a command - -commit 6dfb6c8797454cb24b38ccfa00c05e3813c5e579 -Author: root -Date: Thu May 12 05:20:15 2022 +0000 - - Update vendor - -commit 930954abd922ef4927bf18f2f0d9a670358dc313 -Author: Aditya Dani -Date: Wed May 11 22:11:43 2022 -0700 - - PWX-22627: Add support for IAM role in BackupLocation - -commit ff41727c85c3b116cfa8c771c534694c2242318b -Author: Andrei Kvapil -Date: Thu May 12 22:15:57 2022 +0200 - - linstor: support for WaitingForFirstConsumer - -commit b0d15e0142aaf54801914df04545c67ef61994fb -Author: Luke Pitstick -Date: Thu May 12 11:11:33 2022 -0600 - - PR feedback and testing - -commit 5cfeb4a07ad890495393e55eba8171ee2e796095 -Author: Luke Pitstick -Date: Wed May 11 15:40:01 2022 -0600 - - PWX-23693 Allow scheduling of pods with pending pvcs due to WaitForFirstConsumer - -commit c5f1df8c0627b8d145811d3d625ca38183320691 -Author: Ram -Date: Wed May 11 17:23:59 2022 +0530 - - pwx-23582: VM object migration support - - pwx-23657: link datavolume and pvc object during migration - pwx-23658: avoid ownerref resources for vm object - - Signed-off-by: Ram - -commit 5e8179bd1548d4ff7309270b98fe36c51ba4cc0d -Author: Aditya Dani -Date: Wed May 11 11:34:59 2022 -0700 - - PWX-23703: Explicitly pass migration taskID to get the CloudMigrateStatus for Portworx driver - -commit 506827a08711999512d473702116884e93a00a94 -Author: Ram Suradkar -Date: Mon Nov 29 14:22:37 2021 +0000 - - add namespace resource permission for stork-scheduler clusterrole - - Signed-off-by: Ram Suradkar - -commit e894216546860d5045d61a9de6e6bbcc7f1907c8 -Author: Ram Suradkar -Date: Fri Nov 26 10:53:23 2021 +0000 - - support for k8s 1.22 kube-scheduler - - Signed-off-by: Ram Suradkar - -commit 3fce175ced10464c5bea9336ad0b258322cfd7c5 -Author: Rohit-PX -Date: Tue May 3 07:43:25 2022 +0000 - - Skip deleted namespaces from a migration schedule - - Signed-off-by: Rohit-PX - -commit 3cce9702fa805ef6d3f0f910d91fd2cc20ba8a8c -Author: sivakumar subraani -Date: Wed May 4 08:00:39 2022 +0000 - - pb-2081: Added retry logic to wait for the volumesnapshot status update, before accessing restoreSize. - -commit fdce2656c89e85bbc99bd7228fa65bc1bc270fb6 -Author: Lalatendu Das -Date: Tue May 3 10:15:49 2022 +0000 - - pb-2371: Handle non-existent bucket scenario for object-lock - - - Handle error code "NoSuchBucket" while fetching object lock info for a - AWS S3 based bucket. - - Fixed go version to eliminate travis build failure. - - Signed-off-by: Lalatendu Das - -commit 80663f52424749aeb42d8511bbabd2ff95263bcc -Author: sivakumar subraani -Date: Mon May 2 10:42:07 2022 +0000 - - pb-2081: In kdmp case, resetting the Datasource field of pvc for restore. - -commit fc9a311052adbb9f0b4c003cd53113fd04725835 -Author: Ram -Date: Wed Apr 27 19:02:05 2022 +0530 - - PWX-23581: print stork leader deatails in logs - - Signed-off-by: Ram - -commit 4fc84928a3670580addc5286a0d9014ca53bacdc -Author: Ram -Date: Wed Apr 27 21:59:38 2022 +0530 - - [portworx] update delete api calls with context - - Signed-off-by: Ram - -commit 37724f3f4b33c6a97dd7e9c71e5c4d982c03d9d7 -Author: Ram -Date: Wed Apr 27 21:58:50 2022 +0530 - - openstorage vendor updates - - Signed-off-by: Ram - -commit 46e4721a492f3a1a452d3debf9ed1c8927272d74 -Author: diptianjan -Date: Thu Apr 28 17:14:55 2022 +0530 - - PB-2365: Use pvcUID instead of name while creating dataexport CR during generic backup cleanup. - -commit 4a7ba30a06ef04bde1cf7fe7b1e72b8b358d2cac -Author: sivakumar subramani -Date: Tue Apr 26 04:48:40 2022 -0400 - - pb-2360: Using GetStorkPodNamespace api for cmdexecutor image extraction as well. - - - This take care of the usecase even if stork is deployed on non kube-systen namespace - -commit e9c353b483189926a742b1b44a1021512522b7c1 -Author: sivakumar subramani -Date: Tue Apr 26 01:10:22 2022 -0400 - - vendoring latest kdmp changes from master branch - -commit 74d55834aed7dc76e43572351fa3da3059e0c778 -Author: sivakumar subramani -Date: Sun Apr 24 04:50:48 2022 -0400 - - pb-2330: vendoring latest kdmp repo from master branch - -commit d535c7ad80183298d903720b4c1d36f4881b65cd -Author: sivakumar subramani -Date: Sat Apr 23 17:48:03 2022 -0400 - - pb-2330: remove the portworx repo name from default defaultCmdExecutorImage to support custom repo - -commit 67a73e91b0b4ae888a75c379bd1f4e9951b7413b -Author: sivakumar subramani -Date: Sat Apr 23 01:17:49 2022 -0400 - - pb-2330: Made fix to support image name to have custom repo as well for cmdexecutor image - -commit da41de4d9b056272fc0e45d64708c7435ae31493 -Author: Lalatendu Das -Date: Thu Apr 21 13:45:57 2022 +0000 - - pb-2324: CSIDriver V1 version API needed for k8s 1.22 and beyond - - CSIDriver V1beta1 API support is removed from k8s1.22 verson. - This caused certain DS to initiliazed nil and caused crash for CSI based - backup. Added adequet check and called appropriate APIs of CSI driver. - - Signed-off-by: Lalatendu Das - -commit 5b345b98e8eee3aebde6e8c3050a5d726f81c93b -Author: Rohit-PX -Date: Fri Apr 15 13:18:16 2022 -0700 - - Explicitly pass token to generate cluster pair method - - Signed-off-by: Rohit-PX - -commit bb2f19719160564e8d44114cad8d4b10027dd106 -Author: Lalatendu Das -Date: Thu Apr 21 04:56:29 2022 +0000 - - pb-2298: minimum retention period added to error msg of failed backup - - When a scheduled backup failed due to insufficient retention period then - user need to be aware of minimum retention period to be set via error - message. - - Signed-off-by: Lalatendu Das - -commit 192d0a46a62a52b0c1b9b71efe507bc48f57c0a9 -Author: Lalatendu Das -Date: Wed Apr 20 17:11:56 2022 +0000 - - pb-2325: Set retention period for object-locked Failed backup - - The scheduled failed backup which is created for object-locked bucket - need to set retention period appropriately. This helps px-backup to - delete them when auto-delete flag enabled in px-backup. - - Signed-off-by: Lalatendu Das - -commit 4e0ceaba2bc7b0404a18203f51247a802ad4f0c7 -Author: sivakumar subramani -Date: Wed Apr 20 04:13:05 2022 -0400 - - pb-2323: vendoring latest kdmp repo from master branch. - -commit 7f25dd83e0ff21878cc6af64d921c92256e8fd1d -Author: sivakumar subramani -Date: Sun Apr 17 04:35:06 2022 -0400 - - pb-2292: vendor kdmp repo from master branch - -commit 22745ed2d3f0e64f91cc75b98b693ca57cead250 -Author: sivakumar subramani -Date: Fri Apr 15 13:21:46 2022 -0400 - - pb-2293: add logic to extract registry that container extra directories - with in it for rule cmd executor. - -commit 3c18fd21ed95d6fac6840459b45c64bc799fadbe -Author: Kesavan Thiruvenkadasamy -Date: Thu Apr 14 13:52:51 2022 +0530 - - Modify to use helper method from component helpers package - - Signed-off-by: Kesavan Thiruvenkadasamy - -commit 94aaa8e65871f3dd01d6098797522f61da796ab0 -Author: Kesavan Thiruvenkadasamy -Date: Thu Apr 14 13:52:11 2022 +0530 - - Fix tests due to change in GetClusterPairingInfo parameters - - Signed-off-by: Kesavan Thiruvenkadasamy - -commit b74d6b851d331c4b4c3d5694fe887bc8a41ee30b -Author: Kesavan Thiruvenkadasamy -Date: Thu Apr 14 12:53:22 2022 +0530 - - Update vendor files - - Signed-off-by: Kesavan Thiruvenkadasamy - -commit a10cbd1a3b0eb50e9c3f5427e5f7cf9a07374439 -Author: Kesavan Thiruvenkadasamy -Date: Thu Apr 14 10:36:37 2022 +0530 - - Update go.mod to support kdmp vendoring changes - - Signed-off-by: Kesavan Thiruvenkadasamy - -commit 20b65fd654eb7e9292ca7b901948a8b2053a8161 -Author: Luke Pitstick -Date: Tue Apr 12 14:48:51 2022 -0600 - - PWX-22676 wait for extender to be ready - -commit 314500a3005a8ee8049b1a41e9cd252ac72d4c72 -Author: sivakumar subramani -Date: Mon Apr 11 17:47:33 2022 -0400 - - pb-2288: ebsVolume.Size is size in GiB, So converting it to bytes. - -commit e37c9a9167c803d8ab58516a3beb11f8711c8b9d -Author: diptiranjanpx -Date: Thu Mar 31 10:13:16 2022 +0000 - - [PB-2270]: Initializing cred values from backuplocation only if cluster secret is provided. - -commit 7cc7d37aee9ff9ab8ecf41dd738382e60246c8d4 -Author: sivakumar subramani -Date: Thu Mar 31 16:43:53 2022 -0400 - - pb-2266: Adding backupType in the applicationbackup CR created by - schedule backups. - - - Added fix in Makefile to address the staticcheck failure. - -commit 239ea6d7b67b7639c79068f8081827798f219ef1 -Author: Lalatendu Das -Date: Tue Mar 29 11:16:53 2022 +0000 - - pb-2267: fixed issue in object lock configmap name - - Changed the name of object lock configMap from stork-objLock-config to - stork-objlock-config. This is to avoid any capital letter in it. - - Signed-off-by: Lalatendu Das - -commit 856c442b010aa198927172c5b1db598d93945722 -Author: Ram Suradkar -Date: Tue Mar 29 06:54:05 2022 +0000 - - Upgrade openssl libs - - Signed-off-by: Ram Suradkar - -commit 55772e1191e22bfdaf786e8df3119b7d2c06b32b -Author: diptianjan -Date: Wed Mar 16 23:04:40 2022 +0530 - - [PB-2194]: backuplocation object to have crdential info associated with the cluster. - -commit f2a6dae7ef2862ebe1a12aa9ba50bc0baf134a31 -Author: Lalatendu Das -Date: Sun Mar 27 20:35:09 2022 +0000 - - pb-2259: Fail the schedule-backup for an invalid retention period - - - Created a configMap related to object lock which can be used for altering - the incremental count of schedule backups. - - Implemented the following logic, if the retention period is altered in - between two scheduled backup, then backup will fail for an invalid - retention period - - Signed-off-by: Lalatendu Das - -commit 6f84565113d2f75bb868e6b13a250d15a2774972 -Author: sivakumar subramani -Date: Wed Mar 23 15:43:16 2022 -0400 - - pb-2260: Return default objLockInfo struct instead of error for gke and azure bucket in GetObjLockInfo - -commit 6d25cd654c5f4e4308e885fb87b343418146fbf5 -Author: Ram -Date: Mon Mar 21 19:59:50 2022 +0530 - - Check empty RemotepairID before deleting clusterpair - - Signed-off-by: Ram - -commit 8b23fb4c6900f6d5a8ea7ee0974ecae1a634efa3 -Author: Ram -Date: Wed Mar 16 22:42:56 2022 +0530 - - PWX-23244: Allow pods using FA/FB pvcs in scheduling decisions - - Signed-off-by: Ram - -commit 4dc0126f48f84b0b06267bf2b25e6769eca26650 -Author: sivakumar subramani -Date: Sun Mar 20 13:55:22 2022 -0400 - - pb-2236: Added logic to force full backup for locked period backup from schedulebackup - - - Starting of every day time slot, forcing to have first backup of schedulebackup to - full backup always. - - This is to take care, if the incremental backup was overlapping between two day's - time slot and avoid having incremental backup as first backup in starting of day. - -commit 035c2a6f1f1c212e8d5b849d73fc8c7eae42aaa8 -Author: sivakumar subramani -Date: Thu Mar 17 14:02:28 2022 -0400 - - pb-2241: fixed a nil pointer access in GetObjLockInfo function. - -commit 12a3debbfa7fdaa597e2f41959e8d6f81e854118 -Author: Luke Pitstick -Date: Tue Mar 15 02:04:11 2022 -0600 - - PWX-23212 Treat StorageDown nodes as degraded instead of Online - -commit 0f598651a156e0cc45d0e42ea045af73d7f377ac -Author: Ram -Date: Wed Feb 16 22:36:25 2022 +0530 - - Upgrade torpedo scheduler calls - - Signed-off-by: Ram - -commit aa8760e4ab2bebe5dd4f6dd05e7731259deb42a7 -Author: Ram -Date: Wed Feb 16 22:34:42 2022 +0530 - - Update px vendor libs - - - torpedo - master - - openstorage - master - - Signed-off-by: Ram - -commit a9c84cc30c0026ce126ce478858c2f2e2190431f -Author: Ram -Date: Tue Mar 15 12:29:12 2022 +0530 - - PWX-23231: Ignore completed migration while checking inactive cluster domain - - Signed-off-by: Ram - -commit 0413d4fc78d4c2a0469c6cc15cbef5b481ec3dc5 -Author: Ram -Date: Fri Mar 11 00:12:16 2022 +0530 - - PWX-23026: Check if underlying storage pair is used by another clusterpair - - Hold deletion of clusterpair if another clusterpair exists which is using - same storagepair - - Signed-off-by: Ram - -commit 64e7ec07c1c7444155f40291d8634c500fd215b4 -Author: sivakumar subramani -Date: Sun Mar 13 01:41:38 2022 -0500 - - pb-2179: Added changes required for the supporting object lock for backupschedule deletion. - - - Adding the "object-lock-retention-period" annotation to the - applicationbackup CR created by applicationbackupschedule in stork - - This retention period annotation value will be used in the backupscync - logic to update the retention period of the synced backup object. - - Added GetObjLockInfo in stork objectstore as it needs to be called in - stork as well as px-backup - -commit 7cb7774a5b7eb3d10b2f9ad64528465a660ad222 -Author: Aditya Dani -Date: Sat Mar 12 02:20:28 2022 -0800 - - PWX-23215: Set the elapsed time in MigrationSummary only when required. - - - Only if a resources or volumes are getting migrated then set their - respective elapsed times. - -commit a2172901c210d8fce60381b485b256788f72a173 -Author: Aditya Dani -Date: Sat Mar 12 02:05:46 2022 -0800 - - PWX-23317: Raise an event if clusterpair not found in Migration - - Signed-off-by: Aditya Dani - -commit b9e921fcf993b440b61034019a86ad79b661f27b -Author: sivakumar subramani -Date: Thu Mar 10 00:25:33 2022 -0500 - - pb-2219: Add changes to use the registry name and image secret for kopia - executor from stork deployment spec. - - - Add GetStorkPodNamespace to get the stork pod namespace, even - if it is not installed in the kube-system. - -commit 880ec6d853bc095265ade4391e1bdcc5369f14d2 -Author: sivakumar subramani -Date: Wed Mar 9 23:23:49 2022 -0500 - - pb-2219: vendor changes for latest sched-ops - -commit 7c320e7123d4b7540b96ac0311756666f0acc315 -Author: sivakumar subramani -Date: Wed Mar 9 10:01:08 2022 -0500 - - pb-2219: vendoring latest kdmp changes - -commit 3dc2fcb4f165e2c03917d76099aef979219ad88d -Author: Ram -Date: Fri Mar 4 16:48:21 2022 +0530 - - allow to run complete integration suite - - Signed-off-by: Ram - -commit 5670d51b3f82a8c9f2de0af8a3acce2be6f41cef -Author: Luke Pitstick -Date: Wed Mar 2 10:28:19 2022 -0700 - - PD-1108 update app spec example - -commit 36fbc08b8080b8394cf42d6d6350a5e8bf18dd14 (origin/pb-2196) -Author: sivakumar subramani -Date: Sun Feb 27 10:17:31 2022 -0500 - - pb-2172: Added fixes related to handling of rule command executor image. - - - Added CMD-EXECUTOR-IMAGE-REGISTRY and CMD-EXECUTOR-IMAGE-REGISTRY-SECRET environment variable - to get the custom regitry name and secret for rule command executor pod image. - - Added the annotation to get the image registry secret value, - if some one pass the custom image registry value as annotation in the rule CR. - - Added logic to take the image registry name and secret value - from the stork deployment if the both the env and rule annotation is missing. - - Also, add a fix to fail the backup and delete the rule command - executor pod, if the rule command executor pod is struck in Pending phase for more - than five minutes. Some times, it gets struck in pending state, - if there is any issue with the image registry or secret value. - - Minor fix in printing a err in handle function. - -commit 7ad5271f34accb5fd7e06c5a3232cd2d84296a0c -Author: Ram -Date: Mon Feb 21 22:35:17 2022 +0530 - - [integration-test] increase nodeoffline timeout to avoid race - - Signed-off-by: Ram - -commit 5651b078a3bb7d6d76461d2d4bf5306cc6ac2839 -Author: Ram -Date: Mon Feb 21 22:34:26 2022 +0530 - - PWX-22971: application clone failing at resource stage - - - get volume driverName by looking at pv object - in case of non backup/restore prepareresource calls - - Signed-off-by: Ram - -commit 021fc3c60b10c3600d39ee2401d616e246741deb -Author: Lalatendu Das -Date: Thu Feb 24 03:03:33 2022 +0000 - - pb-2178: Obtain object-lock info for S3 bucket - - Added a function to objectstore package to extract object lock info from - a S3 bucket. - - Signed-off-by: Lalatendu Das - -commit 950cc24775fd742ea5decc2952d87868103b74da -Author: Andrei Kvapil -Date: Tue Feb 8 00:54:39 2022 +0100 - - Format log for linstor driver - - Signed-off-by: Andrei Kvapil - -commit 6479dc4086383dea472f00532ec2574db38d11f1 -Author: Andrei Kvapil -Date: Wed Feb 2 00:44:05 2022 +0100 - - Update golinstor module - - Signed-off-by: Andrei Kvapil - -commit ecb55920e63ede800b4af9492e3e7b76e7921a08 -Author: Ram -Date: Thu Feb 17 11:29:29 2022 +0530 - - pwx-22958: fix failing integration tests - - pvcResizeTest - wait for next migration schedule to trigger - driverNodeTest - Adjust pods reschdule time with updated health monitor timeout - pvcOwnershipTest - use correct storage-provisioner annotation - - Signed-off-by: Ram - -commit b9773cd69de90aa418a6d1666ce43ca3285ab92c -Author: Rohit-PX -Date: Sun Feb 13 09:35:54 2022 -0800 - - Add RabbitMQ operator migration test - - Signed-off-by: Rohit-PX - -commit e3077b75414649fbd329988a205599bc4b48a96a -Author: Ram -Date: Mon Feb 7 18:18:03 2022 +0530 - - Support for activate/deactivate nested server with CR - - Signed-off-by: Ram - -commit 97078febb7be8bf2975223af55c0f782f98d0e0b -Author: Ram -Date: Mon Feb 7 18:17:38 2022 +0530 - - Allow multiple disable path options for appreg - - Signed-off-by: Ram - -commit f6567f866498e703b4b3f77c9528c0383e1f8b6b -Author: Aditya Dani -Date: Mon Feb 14 10:17:10 2022 -0800 - - STOR-479: Add separate elapsed times for Volumes and Resources. - -commit ecbf6806f8495a73f6e570f666996318c565dac5 -Author: Aditya Dani -Date: Fri Jan 28 16:04:52 2022 -0800 - - STOR-479: Add MigrationSummary to Migration CR. - - Stork - - MigrationSummary provides a short summary on what objects got migrated as a part - of a migration cycle. It includes the following: - - total number of k8s resources expected to be migrated - - actual number of k8s resources successfully migrated - - total number of volumes expected to be migrated - - actual number of volumes successfully migrated - - total number of bytes transferred - - Portworx driver will report the total number of bytes transfered for each volume. - - The Migration summary will provide a sum of the total bytes transferred. - - Storkctl - - Add total bytes transferred column to get migrations output - - Modify UTs - - Integration Tests - - Validate Migration summary is updated in the TestMigrationFailoverFailback test. - -commit 93d78dd8cce75f056d1731d1060f160664daea56 -Author: Aditya Dani -Date: Fri Feb 11 15:30:38 2022 -0800 - - PWX-22606: Give low scores to nodes which are in Degraded state. - - - A node if in Degraded state will return half of the score they would - originally return based on the node/zone/rack/region priority. - - If a volume has a replica on a node but the node is in Degraded state - stork will give it a score of rackPriority / 2. This is done since - a pod running on that node is not really hyperconverged and should be - score similar to any another node in the same rack. - - Added UTs for scenarios where a node needs to be scored based on - zone/region/node/rack but is in Degraded state. - -commit c96b617b194f8b39ef53c78d7d4a0d2844de84bb -Author: Ram -Date: Mon Feb 14 13:32:13 2022 +0530 - - Update sched-op apis update changes - - Signed-off-by: Ram - -commit 25f03b2f96683c02ea3cd7e8e089fc21d77d7d81 -Author: Ram -Date: Mon Feb 14 13:31:46 2022 +0530 - - Update sched-ops vendor to master - - Signed-off-by: Ram - -commit 7565de60dc1fbd5b538520f52b7ed83fa55d49eb -Author: Ram -Date: Wed Feb 9 20:52:54 2022 +0530 - - Add autosuspend option to migrationschedule object - - - auto-suspend will allow to stop migration on src cluster - if remote cluster has activated migrated application - - fix review comments - - Signed-off-by: Ram - -commit b513b6b7e34cfde918dc5ae445cc0d4c38644def -Author: Ram -Date: Wed Feb 2 22:33:45 2022 +0530 - - integration test for auto-suspend migrationschedule - - Signed-off-by: Ram - -commit b26bf18c95b489ae40676b4e63c2ffca8cc191f3 -Author: Ram -Date: Thu Nov 11 22:32:02 2021 +0530 - - add migrationschedule name annotation to migration objects - - - Dont scale down apps automatically on primary cluster - - Check and create namespace on DR cluster for migrationschedule - - Signed-off-by: Ram - -commit b8161121037cfa530e049bec847b05634c742310 -Author: Ram -Date: Thu Feb 18 10:52:59 2021 +0530 - - Disable migration if dr site apps are active - - - stor340: Redo cluster-pairing automatically - - storkctl deactivate will also set migrationschedule appactivate field - - vendor update sched-ops - - Signed-off-by: Ram - -commit 1b6fbe92dbdb86af46d4f29a1913122647db586a -Author: Rohit-PX -Date: Fri Feb 11 03:16:53 2022 -0800 - - Torpedo vendor changes for CRD support - - Signed-off-by: Rohit-PX - -commit 44fe3c71f1c9cd6fdd2d0e54e8921c6dccdffe5f -Author: Aditya Dani -Date: Tue Jan 25 17:00:15 2022 -0800 - - STOR-484: Increase the health monitor offline node timeout to ~4 minutes. - - - The health monitor in STORK currently waits only for a minute after it detects - storage driver node as offline. Changing this timeout to ~2 minutes. Stork will - continue to poll for offline nodes every 2 minutes. So the max time a pod - running on an offline node to get deleted will be 4 minutes. - - A storage driver if restarting due to an upgrade can take upto 3 minutes and - STORK could cause an unnecessary restart to the application pods. - -commit 32685418fb8895c0fdfd930f3a7b22c7bc08cac0 -Author: Luke Pitstick -Date: Wed Feb 9 08:38:57 2022 -0700 - - PWX-22607 Add annotation to disable hyperconvergence prioritizing - - PWX-22609 Change webhook controller default to true - -commit c87fbac8704ba86c2b0b4b3a66ca72bdb1ce60b0 -Author: Rohit-PX -Date: Wed Feb 9 08:17:24 2022 -0800 - - Replace sysbench with fio - - Signed-off-by: Rohit-PX - -commit 27f15651b86df1fae49b7d38c255d372d36c3658 -Author: Ram -Date: Thu Feb 10 10:55:38 2022 +0530 - - Don't upload storkctl binaries to s3 bucket - - - Users can copy storkctl binary from stork pod itself - - Signed-off-by: Ram - -commit a91c1d9f36773830f0e5b5ec4ee4e36ece2fa633 -Author: Ram -Date: Mon Feb 7 18:52:53 2022 +0530 - - Update PerconaXtraDBCluster suspend option to gracefully shutdown cluster - - Signed-off-by: Ram - -commit 610beced0c6f61bb2621b63bf4172156b593c961 -Author: Ram -Date: Tue Feb 8 12:35:11 2022 +0530 - - Mark snapshot status as failed if manually deleted by user - - Signed-off-by: Ram - -commit be77c700bab9910edefe9f3495d65f7a99374df0 -Author: Ram -Date: Mon Feb 7 23:23:00 2022 +0530 - - Always sync volumesnapshotschedule and volumesnapshot status - - Signed-off-by: Ram - -commit 5911c1dfd7d2726701cd87f0cad3fda9c4ca91da -Author: Aditya Dani -Date: Fri Jan 21 11:19:53 2022 -0800 - - STOR-577: Portworx Driver: Do an explicit not found check in DeletePair - - - Change the error condition to do a string check on "not found". - -commit 58e441bdcdb6bef965754ea09e8e6c786c8594a2 -Author: Rohit-PX -Date: Tue Feb 1 05:52:17 2022 -0800 - - Separate out migration and snapshot tests and run all tests - - Signed-off-by: Rohit-PX - -commit aa43b560025eddd304b7451b83b6f92edafa81b5 -Author: Rohit-PX -Date: Tue Feb 1 06:56:02 2022 -0800 - - Migration test for mongo operator - - Signed-off-by: Rohit-PX - -commit 765e8957c86b475590e59dfa5e1f3fed8171d2df -Author: Ram -Date: Fri Jan 28 17:08:20 2022 +0530 - - integration test to validate pvc resize after migration - - Signed-off-by: Ram Suradkar - -commit cd44606a7ae9ffd35f09fd35e705849237b2f9e0 -Author: Ram -Date: Tue Jan 25 17:19:33 2022 +0530 - - keep storage class in pvc spec for migrated pvcs - - Signed-off-by: Ram Suradkar - -commit daf8f93a88ba71fbf05f65bc1273fff604124220 -Author: Luke Pitstick -Date: Tue Feb 1 15:59:11 2022 -0700 - - PR Feedback - -commit 1f7daadb5770bcab336d6531216b3417c14724f9 -Author: Luke Pitstick -Date: Fri Jan 28 09:59:31 2022 -0700 - - Refactor - -commit e2f0a0626cfa75eebc60691186bd013e0f77a74a -Author: Luke Pitstick -Date: Wed Jan 26 10:36:53 2022 -0700 - - Scramble object order - -commit 8fc1f3b8777a3da4f999ad42af635e2a2e757fa8 -Author: Luke Pitstick -Date: Wed Jan 12 11:27:22 2022 -0700 - - STOR-573 apply updatedObjects in parallel during migration - -commit 0928646b2c4cc578af379e7c307cce7180a600a8 -Author: Ram -Date: Tue Jan 25 17:49:39 2022 +0530 - - Fix CVEs reported by dependabot - - Signed-off-by: Ram - -commit 998683f66f18eba1281ad99a9e370d5dc2f163c3 -Author: Rohit-PX -Date: Fri Jan 21 03:49:54 2022 -0800 - - Add support for openstorage.portworx.io as jwt issuer - - Signed-off-by: Rohit-PX - -commit 46d20f921bfed040ec1a77395b80f8dc6de4deac -Author: sivakumar subramani -Date: Mon Jan 17 03:02:15 2022 -0500 - - pb-2162: vendor changes for the kdmp master branch - -commit 0fb31b7231a46cfafaf3b0e274b6b2cdd2278f5b -Author: sivakumar subramani -Date: Mon Jan 17 00:11:35 2022 -0500 - - pb-2162: Added fixes for backup/restore on ocp environment. - - Including system:openshift:scc rolebinding in the backup - - Added privileged scc for job role. - -commit 532949ca566da7252178d1d64a157e9990cc6513 -Author: diptianjan -Date: Mon Jan 17 09:29:40 2022 +0530 - - Vendoring in from latest kdmp. - -commit af871c01650a1346f6fad5e5a709581513e6069b -Author: diptianjan -Date: Sat Jan 15 09:36:27 2022 +0530 - - Addressed the review comments. - -commit 97b8e4a58046075759f4894ad5853e2d5fd133f0 -Author: diptianjan -Date: Wed Jan 12 12:38:25 2022 +0530 - - [PB-2148]: Implementing job framework to make pvc bound in case of waitforfirstconsumer case. - -commit 6a8c69b4c08ffc2607715a955ecbddadcbf74e08 -Author: Rohit-PX -Date: Tue Jan 4 07:31:23 2022 -0800 - - Integration test to delete stork pods on destination during migration. - - Signed-off-by: Rohit-PX - -commit 8ac87e611313c709628b9b84581b50302874e624 -Author: Prashanth Kumar -Date: Thu Jan 13 00:46:52 2022 -0500 - - Skipping zone checks for EFS provisioner for generic backup/restore paths - -commit a9f23af1c447c143dd2deeb09f291e3bd1cdf58f -Author: sivakumar subramani -Date: Wed Jan 12 14:29:27 2022 -0500 - - pb-2157: retaining the rolebinding subjects if the namespace is not set during restore. - -commit 40a08b489d40baa6c205f91af32ce3dc8932ee28 -Author: sivakumar subramani -Date: Mon Jan 10 02:30:20 2022 -0500 - - pb-2113: vendor change for kdmp from master branch. - -commit 6f415f0c02cf5264d7f643b28eb191219a5ad7f9 -Author: Prashanth Kumar -Date: Thu Jan 6 00:40:13 2022 -0500 - - Multi zone support for aws/gke - - As part of kdmp restore, it is made sure that kdmp job pod - and the restore PV's are created in the same zone - -commit 26cf2a3f08e89f27af4aebe3d9371a2638aaf305 -Author: sivakumar subramani -Date: Sat Jan 8 05:20:32 2022 -0500 - - pb-2156: Added logic to default to KDMP for OCP rdb and cephfs provisioner. - -commit 25a9fe73f69bf48e434754984719c3ea4109973c -Author: sivakumar subramani -Date: Fri Jan 7 02:22:37 2022 -0500 - - pb-2149: vendor changes for kdmp from master branch - -commit a872471fa9c3e1f3975bdebee27f3449239c36d1 -Author: Aditya Dani -Date: Mon Jan 3 13:39:27 2022 -0800 - - Raise an event if cluster pair delete fails. - - - Do not remove the finalizer if cluster pair delete fails from the driver's perspective. - - Retry the operation in the next reconcile loop. - -commit a89660147f853ce6903276fc23603725e085cfde (origin/pb-2011) -Author: sivakumar subramani -Date: Tue Jan 4 14:20:49 2022 -0500 - - pb-2151: Added check to make sure zone array is not empty in preparePVCResourceForApply - -commit 553ab59745655eef724d93394b2cdbe8c1d006c8 -Author: diptianjan -Date: Fri Dec 24 09:42:27 2021 +0530 - - [PB-2143]: Making pvc size same as csi snapshot size to avoid the clone failure. - -commit 40510a2cf18ea1350235a4c0e52c9600830fddba -Author: sivakumar subramani -Date: Tue Dec 28 23:01:39 2021 -0500 - - pb-2118: Added changes to support cross-region backup in native GKE - driver. - - - Update the logic take the destination zone and region in the - StartRestor function. - - Updating the PVC resource with nodeselected annotation form - the destination cluster based on the region/zone selected for - restore. - -commit d92c49f0fce3cf7cc767185858a3455f0b7bdfde -Author: sivakumar subramani -Date: Fri Dec 24 10:41:11 2021 -0500 - - vendor changes for kubernetes pkg - -commit 35c1a8a55fe67e0a260ccce178158bf618f7e234 (origin/siva-zone-map, origin/siva-tz, origin/siva-reconcile-dec19, origin/siva-gke-cross-reg, origin/master_aws_cross_region) -Author: sivakumar subramani -Date: Fri Dec 17 09:07:27 2021 -0500 - - pb-2131: vendor latest kdmp from the master branch. - -commit 87099e7fa665325ea1f210177a2691c3fbce3478 -Author: diptianjan -Date: Mon Dec 13 17:28:23 2021 +0530 - - PB-1079: Don't list the related clusterrole and clusterrolebinding of a serivce account - if user does not have permission for that namespace. - -commit bf7971a45ad320fda421caf8233af4abacf70812 -Author: Luke Pitstick -Date: Wed Dec 15 17:41:10 2021 -0700 - - STOR-516 only try to create a clusterpair if a token is provided - -commit 52d82cd78bbc2a53b0e4d9840153b908794f923e -Author: Prashanth Kumar -Date: Thu Dec 16 00:18:18 2021 -0500 - - Passing kdmp-config map name for kdmp job delete - -commit 97686573a69cf4ba783f1ba5a6a876274ce7ed8e -Author: Prashanth Kumar -Date: Thu Dec 16 00:18:59 2021 -0500 - - vendor update for kdmp - -commit b7e24a0a132d74859b615477cbe4fb37fab832ad -Author: diptianjan -Date: Thu Dec 16 00:22:11 2021 +0530 - - [PB-2133]: Checking for waitforfirstConsumer if pvc's storage class has that set as volumebindingmode. - -commit 43d8125b338591ad7d467b4f528396425e4be80e -Author: Prashanth Kumar -Date: Tue Dec 14 08:12:45 2021 -0500 - - As part of restore if a user selects few PVC's to be restored, filter the - non-selected PVC's from includeResource map. This is applicable for - the case where Retain is selected. - -commit fc184ff7afb8a367663ac65beb4614d91370a4e6 -Author: Aditya Dani -Date: Mon Dec 13 14:21:15 2021 -0800 - - Set the new storage class on the PVC during ApplicationRestore - - - If the storage class mapping is set on ApplicationRestore on the PVC's - source storage class is found in this mapping, then on the destination cluster where - the PVC is being recreated set the new storage class obtained from this mapping. - - Signed-off-by: Aditya Dani - -commit b928b4459c336c0b55f869a42a0a5d1841abd090 (origin/pb-2126) -Author: Prashanth Kumar -Date: Tue Dec 14 01:15:14 2021 -0500 - - vendor update for kdmp - -commit 7fc6acdc30fa79b0fdd6e50ac80e5028722a0df9 -Author: sivakumar subramani -Date: Mon Dec 13 06:37:57 2021 -0500 - - pb-2125: Added check for nil SC in getRestorePVCs - -commit b7e52e497c1cc39402b7cae9962c05843830c784 -Author: sivakumar subramani -Date: Sat Dec 11 11:35:09 2021 -0500 - - pb-2113: Setting storageclass to nil in pvc, if it is empty. - - - The default storageclass configured on the setup, will be - picked up, only when the storageclass is not set. - - Emptystring as pvc storageclass will not select - the default storageclass configured on the cluster. - -commit 3e09549170c56aa5ca4bb55ea631a1b9fd65ef98 -Author: sivakumar subramani -Date: Sat Dec 11 05:48:52 2021 -0500 - - pb-2113: vendor changes for kdmp from master branch. - -commit a8542eec3af49ed816f69080c3db05e9a71db340 -Author: sivakumar subramani -Date: Fri Dec 10 11:38:11 2021 -0500 - - pb-2116: Added kubernetes.io/azure-file provison in - csiDriverWithoutSnapshotSupport - - - moved the check to pv.Spec.CSI secion in CreateSnapshot up, - immediately after getting the pv content. - - Added a note to mention that filestore.csi.storage.gke.io gke - filestore support snapshot. - -commit 2d6758d5b2a104066450d69bbf8f40cb0c308daf -Author: diptianjan -Date: Fri Dec 10 11:06:59 2021 +0530 - - vendoring in latest kdmp. - -commit 4ab3a2976f98dba9a69aa313d392ea20232d35f9 -Author: sivakumar subramani -Date: Wed Dec 8 09:03:56 2021 -0500 - - pb-2110: Fixed the issue in return value of IsCSIDriverWithoutSnapshotSupport api for non-csi case - -commit 76e500ff1027b5cb781050d5e0f87752f28ee5ec -Author: sivakumar subramani -Date: Tue Dec 7 13:24:43 2021 -0500 - - pb-2101: vendor latest from master kdmp branch. - -commit 455059e68e28fd8bbc76fd59b789d4855dd116c9 -Author: sivakumar subramani -Date: Sat Dec 4 12:56:45 2021 -0500 - - pb-2098: Added google file and azure file check to default to KDMP - driver. - - - Removed IsDriverWithoutSnapshotSupport and - isCSISnapshotClassRequired. - - Added a IsCSIDriverWithoutSnapshotSupport common API in volume pkg - and adding it in kdmp and csi driver. - - Also added csiDriverWithOutSnapshotKey configmap field to get user - input for driver that does not support snapshot. - -commit 253823493dfc8193627994edd4a4fd1ab1ae4ba9 (tag: v2.8.1) -Author: diptianjan -Date: Mon Dec 6 21:14:43 2021 +0530 - - [PB-2104]: Using , as the delimiter for the composite string of volumesnapshot. - -commit 798dedbbc8fd13ff15ddaa3c18c7ef12733a3703 -Author: diptianjan -Date: Thu Dec 2 16:55:32 2021 +0530 - - [PB-2094]: Preventing readding of volumesnapshotname in the applicationrestore CR. - -commit e18d7ab585343b8f9f4d9670872e0f620dc62f42 -Author: Prashanth Kumar -Date: Fri Dec 3 07:26:25 2021 -0500 - - Fixing kdmp restore across Azure regions - - As part of the restore, fetch the driver from the vol info instead of calling GetPVDriver() - - Removed volume.kubernetes.io/selected-node annotation from the PVC spec or else Azure PVC - always tries to get bounded to the same node which might not be present - -commit ed6344395196bf16ea7ec98b326b9e745d611df2 -Author: sivakumar subramani -Date: Fri Dec 3 08:25:53 2021 -0500 - - pb-2101: Fixed the issue in isCSISnapshotClassRequired function such - that for non CSI volumes, it will directly go the kdmp backup - - - Added hostaccess scc in the backup job for ocp platform. - -commit b3ef06d0ac77aa2fcb27525c813faf7377a116ec -Author: diptianjan -Date: Mon Nov 29 20:22:45 2021 +0530 - - Vendoring in latest kdmp changes. - -commit 4971dde10020b335b378ab69fe3ef5f4201d7cfb (origin/pb-2065-fix) -Author: Ram Suradkar -Date: Wed Nov 24 05:14:13 2021 +0000 - - revert clsuterolebinding collection based on user access to SA - - Signed-off-by: Ram Suradkar - -commit b0a612a2c0c1562db1bf6148dabd6af8d5f790ac -Author: Ram Suradkar -Date: Tue Nov 23 18:41:36 2021 +0000 - - PB-2079: collect clusterrolebinding subjects if sa is not found - - Signed-off-by: Ram Suradkar - -commit 2844ee938a69e354583d10ec73b649042dbfd424 (origin/pb-2075) -Author: Prashanth Kumar -Date: Mon Nov 22 09:07:14 2021 -0500 - - vendor update from kdmp master - -commit 911f9b4d7a0103f24e7b296581698511aaa90778 -Author: Prashanth Kumar -Date: Sun Nov 21 14:28:31 2021 -0500 - - Skipping namespaces which are not part of restore ns mapping - -commit d5f1968e0c6ccd80472e968d3e35f904464c4ee0 -Author: Prashanth Kumar -Date: Fri Nov 19 12:59:55 2021 -0500 - - Adding env variable to specify market place deployment - - user can set env MARKET_PLACE=aws to specify the stork being deployed on - AWS marketplace so that appropriate kopia executor image is picked from - market place repository - -commit 996b135ce10ee46602d1d6b3ed64259c0afa7659 -Author: Aditya Dani -Date: Thu Nov 11 03:51:58 2021 -0800 - - STOR-528: CSI VolumeSnapshotClass fixes - - - If the SnapshotClassName is set as "default" or "Default" - let Kubernetes handle the snapshot class selection. It wil use - the default snapshot class set by the admin for that CSI provider. - - - Do not create stork-* volume snapshot class on startup if a snapshot - class for the CSI driver already exists. - -commit 3b570e260980728db57a736cfb38ad880c25c929 -Author: Prashanth Kumar -Date: Sat Nov 20 08:54:20 2021 -0500 - - vendor kmdp master changes - -commit 102cb76fa35c17c3814ea8397651361b9425f6b8 -Author: sivakumar subramani -Date: Fri Nov 19 13:20:30 2021 -0500 - - pb-2061: removed the incorrect error handling while calling - runtimeclient.ObjectKeyFromObject function. - -commit f26546d3c2e02365ed290cf918c5b67ed7c0daae -Author: diptianjan -Date: Sun Nov 14 23:30:03 2021 +0530 - - [PB-2010]: support for replace policy in generic restore. - Volume status to become RETAINED if replace policy is set to retain. - -commit 621a7a06d7f89eac70c91069c0faff2b594dbc9f -Author: sivakumar subramani -Date: Wed Nov 17 07:49:54 2021 -0500 - - pb-2036 Resetting restore PVC's PV to nil irrespective of storageclass map presence. - -commit c80e6a289a46f35565a93a5f3d2af31790c5ee5c -Author: sivakumar subramani -Date: Tue Nov 16 13:29:10 2021 -0500 - - vendor changes for kdmp repo from master branch - -commit 57ffbba72a69f1ae55c735492da092ee6d636b3a -Author: Aditya Dani -Date: Wed Nov 17 17:07:22 2021 +0530 - - Change the pull request template - -commit f34c13b723e3182564f383a7f2c4585a20ed05c4 -Author: diptianjan -Date: Mon Nov 15 21:55:59 2021 +0530 - - [PB-2030]: pv/pvc should be included as resources in kdmp restore list. - -commit 99e44c8183308007b8093395b9207fcabcf9cc5f (origin/pb-2016) -Author: Aditya Dani -Date: Thu Nov 11 02:54:13 2021 -0800 - - STOR-556: OCP Migration improvements. - - - Do not collect DeploymentConfigs owned by operators. - - Do not collect openshift-service-ca.crt config map created by - Openshift in every namespace. - -commit 738d7529eb9fb0531fae14ea7973276e1f8e2f05 (origin/pb-2023) -Author: sivakumar subramani -Date: Sun Nov 14 15:56:02 2021 -0500 - - pb-2020: Added check in assigning SnapshotStorageClass in dataexport CR - - - Assigng the SnapshotStorageClass in DE CR only when - LocalSnapshotRestore is true, in which case, we will try the - local snapshot restore. - -commit 6421f416e268a3e57c6eaed86a304f644deb2666 -Author: diptianjan -Date: Sat Nov 13 23:49:36 2021 +0530 - - Vendoring in latest kdmp changes. - -commit e02f28e240e975461486adac2fa2e982db5152ef -Author: diptianjan -Date: Sat Nov 6 00:07:47 2021 +0530 - - [STOR-513]: support for local restore for csi volumes. - -commit dd2092c53a724c1253220130ff37a97949549cd8 -Author: sivakumar subramani -Date: Fri Nov 12 16:47:05 2021 -0500 - - pb-2015: remove the storage-class and storage-provisioner annotation - from pvc while restore it. - - - Even though we updated the spec.storageclass of the pvc to the - new storage class from the storage class, while restoring - ,it was not getting bound. The k8s was referring to the old - storage class from the annotation and pvc was struck in - pending state. - -commit 9718d1ff9852f17595374d77b74296cbb3ae6118 -Author: sivakumar subramani -Date: Fri Nov 12 21:24:17 2021 -0500 - - pb-2003: added ordered way of classifying the driver in GetPVDriver api - - GetPVDriver is used in the resource restore path and we try to - avoid CSI pvc applied again. Since ordered list is not used in - this function, some it ended up in kdmp driver and ended in - partial success. - -commit 347ffbf96bb9d52708bc6a37afdaaaf8bc1198ab -Author: diptianjan -Date: Thu Nov 11 23:07:41 2021 +0530 - - Fixing volumesnapshot name and handling already exists for volumesnapshot and content. - -commit 0775ca36730abf3a638a2e493fa00c917e817962 (origin/pb-2009) -Author: diptianjan -Date: Wed Nov 10 11:11:27 2021 +0530 - - Putting the volumesnapshot class and backupUID in volume info as these are required in restore. - -commit 3b1295efc88bfd9abffd4ea800af36f7a253e732 (origin/stor-514) -Author: diptianjan -Date: Mon Nov 8 18:06:12 2021 +0530 - - vendoring in latest kdmp changes. - -commit c7d81c69e91f69634a531dfc431c86acf4edaacb -Author: Prashanth Kumar -Date: Sat Nov 6 07:30:43 2021 -0400 - - stor-547: Fixing crash when taking a backup of CSI and PXD vols - - If backupType is Generic in kdmp-config, use this for all drivers - except pxd to take generic backup - -commit 496fcd87ccdd3dc9b6bef491c1c476b0d9093d09 (origin/stor-2003) -Author: diptianjan -Date: Wed Nov 3 19:20:26 2021 +0530 - - [STOR-551]: pvc creation for kdmp restore should happen in kdmp controller. - -commit f865cb5dc48b400fd6b15d4a506ff1d0d261944f -Author: siva-portworx -Date: Fri Nov 5 05:24:20 2021 -0400 - - vendor kdmp chnages from master branch - -commit f496102140c3ee3172bdd769d1efa8f4e8e1a3a5 -Author: siva-portworx -Date: Thu Nov 4 23:20:44 2021 -0400 - - vendor changes for openstorage repo from release-9.1 branch. - -commit 34afda6d8f143e7aafbac71fc175ddd8a7591b32 (origin/stor-545-vendor-issue) -Author: siva-portworx -Date: Tue Nov 2 05:23:36 2021 -0400 - - pb-1997: Added check not to intialize the snapshot class in the DE, if - CSI does not support snapshot or cases where we want to skip CSI and - take generic backup (proxy-volume) - -commit 88ec148875262be67266d259a228dff0bebc7023 -Author: siva-portworx -Date: Fri Oct 29 06:10:58 2021 -0400 - - stor-514: Added checks such that pure FB and vsphere PVC will be - defaulted to kdmp driver for snapshot. - -commit 088d8fdeaa377052ced2b24b40c7de60009c9040 -Author: siva-portworx -Date: Wed Nov 3 01:42:31 2021 -0400 - - vendor changes of kdmp from master branch - -commit c5afc04d779bdd4316f13c759de19809c27c1d49 -Author: siva-portworx -Date: Mon Nov 1 07:39:57 2021 -0400 - - pb-1988: Added alreadyExists check for creation of volumeSnapshot and - PVC creation - - - While deleting the snapshot CRs, setting the proper retain - value. For latest snapshot, retain will be set true, so that local - snapshot will be retained. For other snapshots, it will be set - false, so that both the CR and local snapshot files are also - will be deleted - -commit 3eba3cbbe1ebd31a077be0787c977552c2f29203 -Author: siva-portworx -Date: Tue Nov 2 13:59:23 2021 -0400 - - stor-545: Added provisioner and volumesnapshot fields in applicationbackup CR. - -commit de495f217e7c9a7b646ac8cc385e7bdd926548f6 -Author: siva-portworx -Date: Tue Nov 2 13:57:50 2021 -0400 - - stor:545 vendor for kdmp from master branch - -commit ffa64d33f81da4e239a05a59dbb33f58b95377cf (origin/pb-1988) -Author: siva-portworx -Date: Thu Oct 28 07:02:09 2021 -0400 - - vendor latest kdmp from master branch - -commit 6b1ee47a38b7a0f79b3204e0dfa3a6826f7fc814 -Author: Prashanth Kumar -Date: Tue Oct 26 14:07:45 2021 -0400 - - Changed "generic type string to "Generic" - -commit 477a879b25d5220680b8bce278ca617cfd1bd188 (origin/pb-1982-fix, origin/pb-1982-1) -Author: diptianjan -Date: Mon Oct 25 16:49:17 2021 +0530 - - Vendoring latest kdmp changes. - -commit 964e1d8300eb989d9b0045b1b2407cb5313550e7 -Author: diptianjan -Date: Mon Oct 4 20:03:04 2021 +0530 - - [STOR-481]: Uploading CRs in csi generic backup. - Made the required changes as part of snapshotter package. - -commit 6883a2cd0a78dbdae38110a91a7e562593ed2011 -Author: siva-portworx -Date: Sat Oct 23 01:12:02 2021 -0400 - - pb-1981: Adding kdmp CRs to skip during applicationregistration. - -commit 76dec1e5d808b0537e5e7fe39971128a9574a79b (origin/pb-1982) -Author: Ram -Date: Fri Oct 22 14:25:18 2021 +0530 - - STOR-537: Reverse Migration is failing for PX pv objects - - - improve volume bound time - - skip pv update if respective pvcs are skipped - - Signed-off-by: Ram - -commit c9b0d02b75a97e9f92d968e0c9bd0e8c03d6cc51 -Author: siva-portworx -Date: Fri Oct 22 03:55:19 2021 -0400 - - pb-1978: Adding deletionTimestamp check before calling delete for dataexport CR. - -commit ec2e7d3eddf9b9879fa8c07509819030bddb02a9 -Author: Prashanth Kumar -Date: Thu Oct 21 07:31:49 2021 -0400 - - Reading backup type from config map - - User can set all backups to be generic by adding BACKUP_TYPE=generic in kdmp-config map - - This way all backups would be forced to be generic - -commit 89fefe24385bf17e853cf76cd075b01b587b99d2 (origin/pb-1966) -Author: Aditya Dani -Date: Thu Oct 21 14:21:32 2021 -0700 - - Remove wget from Dockerfile and use curl instead - - - Reduces the vulnerabilities reported by DTR - from: - - 15 critical - 58 major - 25 minor - - to: - - 7 critical - 27 major - 22 minor - -commit a5fefcb5309db5da8558b465b40ef006b233a7e2 -Author: Aditya Dani -Date: Thu Oct 7 15:40:26 2021 -0700 - - Modified the decision making process for choosing a driver for a PVC. - - - Create an ordered list of drivers defined in STORK. For every PVC - stork will check these drivers can handle this PVC. If all the drivers - fail the last one in the list - KDMP will pick it up. - - The goal is KDMP should handle all kinds of PVCs for all the different - workflows that stork supports. - - This also means that for currently non-supported APIs by KDMP driver - the respective operation will fail if none of the other drivers support that PVC. - -commit da58d384827f250ee6c5fca50e52833253d236ae (origin/pb-1975) -Author: siva-portworx -Date: Tue Oct 19 07:49:01 2021 -0400 - - stor-529: Calling cleanupResources as part of finalizer handlers in applicationbackup controller code - -commit 4504d85f717aeedaa895741c517517d2445ca643 -Author: Prashanth Kumar -Date: Wed Oct 20 00:30:27 2021 -0400 - - vendor update for kdmp - -commit 79a787fa7fac963177a7cf7c00b96a6fb19b7f2a -Author: Ram -Date: Mon Oct 18 23:30:41 2021 +0530 - - Remove stork-scheduler version update from integration test script - - Signed-off-by: Ram - -commit 09f413387529b9395bbc12eed41715610a2f2272 -Author: Ram -Date: Thu Oct 14 19:24:32 2021 +0530 - - update migration status for each pv - - Signed-off-by: Ram - -commit b34b23c22405e93bb9b8bbd3d34727c3a5db6c9f -Author: Ram -Date: Thu Oct 14 14:22:18 2021 +0530 - - STOR-530: Unable to take backup on GKE 1.21 cluster - - fix deprecated zone label for pv - - Signed-off-by: Ram - -commit c73c5b896fece5123977158b54a9b0bf0133205f -Author: Prashanth Kumar -Date: Mon Oct 18 09:59:32 2021 -0400 - - vendor update for kdmp - -commit 8dfb41df7cd9df0b30a558b6feeb4cba821e56f1 (origin/pb-1962) -Author: siva-portworx -Date: Tue Oct 12 15:41:54 2021 -0400 - - vendor changes for kdmp from master branch - -commit 55ebe4dca9bce5543d636e459315cfe9d8c5c220 -Author: siva-portworx -Date: Mon Oct 11 14:23:55 2021 -0400 - - pb-1802: Added steps to create the kdmp-config cm with default values. - - - Initially creating the kdmp-config config map with the default - values need for kdmp job rate limit values and container - resource limit values as well. - - Also added check to include Final stage in the failure check - of kdmp driver. - -commit 5f7672036ecea70c0f0c18236f846a6b68dda51b -Author: Aditya Dani -Date: Tue Oct 12 13:01:44 2021 -0700 - - STOR-526: Use python 3.9.2 in stork image. - - - Python 3.x until 3.9.1 have a security vulnerability - CVE-2021-3177 - - Updated the python version in the container to 3.9.2 - -commit dfae08b0e45a434756f2e90f52a7f741b7634b4a -Author: Aditya Dani -Date: Tue Oct 12 10:49:24 2021 -0700 - - STOR-527: Add nil check in GetPodVolumes portworx implementation. - - - A PVC object can be empty if the PersistentVolume is directly - provided in the pod spec. - -commit 2d69baba429571c28e4ce6a3fca6d51889726e27 (origin/pb-1802-1) -Author: Prashanth Kumar -Date: Fri Oct 8 16:04:28 2021 -0400 - - stor-522 Fetch backup and restore size or data mover - -commit 943077c0288ba0aa44d6d933d43c97546c03fc34 -Author: Aditya Dani -Date: Sun Oct 10 08:44:05 2021 -0700 - - STOR-398-v2: Add plural forms as short names for VolumeSnapshots and VolumeSnapshotDatas - -commit 09ff758c99ddf882f6877e6f80dbfac163a29dbe -Author: Aditya Dani -Date: Fri Oct 1 14:47:47 2021 -0700 - - vendor update from libopenstorage/openstorage - -commit 53e732e0cbc709cd06383824144fb35f09cb08aa -Author: Aditya Dani -Date: Thu Sep 30 17:31:36 2021 -0700 - - Portworx: Parse the storage class mapping while restoring a volume. - - This change allows backing up a Portworx volume of a particular storage class - and properties, and restore that volume with a different storage class. - For ex. Backup a volume with HALevel=2 and restore it with HALevel=1 - - - Parse the storage class mapping from restore spec and find if a mapping - for the current PVC's storage class is found. - - If found, fetch the actual contents of storage class and parse them into - a RestoreVolumeSpec. - - Invoke the CloudBackupRestore API with this RestoreVolumeSpec. - -commit ae7f793dcb5ced8f6c784fecf05271b8877d1a8a -Author: Ram -Date: Tue Oct 5 16:15:06 2021 +0530 - - ptx-1404: nit msg for storkctl activate migration on crds - - Signed-off-by: Ram - -commit 32732ed798324413c228de8c2c88ac28a8bb4fcd -Author: Ram -Date: Tue Oct 5 16:14:43 2021 +0530 - - stor-520: handle nil entry for ns labels while restore - - Signed-off-by: Ram - -commit 21f38b24e9b2391ec8bb19c52789dd31b1cc9801 -Author: Prashanth Kumar -Date: Wed Oct 6 22:14:40 2021 -0400 - - vendor update for kdmp - fixed job pending issue - -commit 28160efc05cbcfbd6d97550ec14193a6886571f0 -Author: Ram -Date: Tue Oct 5 15:53:56 2021 +0530 - - stor-509: Update pvc uid mapping while migrating pv objects - - CSI PVC does not go into bound state on DR cluster if pv claimref has - invalid pvc uid - - Signed-off-by: Ram - -commit 95179147a087d3ce386e4fbd224de1df998d8c59 -Author: Prashanth Kumar -Date: Wed Oct 6 01:15:09 2021 -0400 - - vendor update for kdmp - -commit c670be7925d2c605a199c849695d86fae48cc77b (origin/pb-1856) -Author: Aditya Dani -Date: Mon Oct 4 14:26:18 2021 -0700 - - KDMP: Before starting a restore check if destination PVC is in Bound state. - - - Check for both pvc.Spec.VolumeName is not empty and the PVC status is in Bound state. - -commit bcf7e2bfd81f36fc9c8abdb403fe6c27bef1093a -Author: Aditya Dani -Date: Mon Oct 4 09:25:16 2021 -0700 - - Portworx: Check for auth params in in-tree storage class definition as well. - -commit f7b00bd26bb32c6ede8b707b74e9499eebbe4a04 -Author: siva-portworx -Date: Mon Oct 4 10:06:15 2021 -0400 - - pb-1941: remove the fix of changing the status to Inprogress for call cleanupResources. - -commit a59d39773a96e6a40f544f5db73d3be6f95be887 -Author: siva-portworx -Date: Mon Oct 4 09:26:54 2021 -0400 - - vendor changes for kdmp repo master - -commit 264b68e6821ea71cb8ecb31a1e5491c07b56d51c -Author: siva-portworx -Date: Sat Oct 2 11:24:13 2021 -0400 - - pb-1941,pb-1944: modified the generic backup resource name to be match - new naming format as given below - - - name format --- - - Calling cleanup function in Final case is creating issue as - px-backup will delete the CR as soon the stork updates the CR - status to success or failure and stage as Final. Because of that - application backup CR is getting deleted before dataexport CR. - - Now calling cleanup before moving the status of final stage - and success. - -commit 7e90929602d8adf5b26bb6e09a1f1510134f0045 -Author: siva-portworx -Date: Fri Oct 1 16:13:50 2021 -0400 - - vendor changes for kdmp repo. - -commit 2c7dd9dff943380dbf39e3ce990de513e711caa1 -Author: siva-portworx -Date: Thu Sep 30 13:33:24 2021 -0400 - - pb-1941: added pvc UID to be part of volumeInfo in ApplicationBackup and ApplicationRestore CR definition - -commit 5251ee38dcae0eeddf25d9293cb4378f76435749 -Author: Aditya Dani -Date: Sat Sep 25 08:25:19 2021 -0700 - - vendor updates from kdmp - -commit 67dbf03ec05d2c43aedff60f173cdf1a0b17daf6 -Author: Aditya Dani -Date: Fri Sep 24 09:34:24 2021 -0700 - - Add support for handling snapshots in KDMP driver. - - - Pass the snapshot storage class in the DataExport CR when provided - in the ApplicationBackup CR. - -commit f67574376f8814491755a77fb48ea0ab10dbd223 -Author: Aditya Dani -Date: Mon Sep 27 17:04:00 2021 -0700 - - STOR-398: Add short names for VolumeSnapshot and VolumeSnapshotData CRs. - - - vendor update from libopenstorage/external-storage - -commit 7803c7adc1c39c1e4434b2500a945c9cce816cea (origin/pb-1916-1) -Author: Ram -Date: Wed Sep 29 10:55:24 2021 +0530 - - [kdmp] remove postfix special chars while labelling crs - - - address review comments - - Signed-off-by: Ram - -commit fca8c6636db9db23345c1148fcdb24638c914e64 -Author: Ram -Date: Tue Sep 28 20:52:32 2021 +0530 - - Fix issue of multi-ns backup and restore for kdmp driver - - stor-512: restore completed as partialsuccess for generic backup - - Signed-off-by: Ram - -commit 41bbb1103ccaf60ddc249f91c6ce6cee9f35dc2b -Author: Ram -Date: Tue Sep 28 12:57:37 2021 +0530 - - Add CleanupResource api for backup/restore - - Signed-off-by: Ram - -commit e59eac2d59151994889f5d1ce9cb6442035fcbd4 -Author: Ram -Date: Mon Sep 27 10:56:24 2021 +0530 - - Cleanup DataExport CR after backup/restore - - Signed-off-by: Ram - -commit 4ae2c86c8db63740114b972862dac0fc7fa2f653 -Author: Prashanth Kumar -Date: Tue Sep 28 03:16:13 2021 -0400 - - vendor update for kdmp for ssl enable/disable - -commit 326cd5363a2a2da9bf83c36de0527849e15b3fb4 -Author: siva-portworx -Date: Tue Sep 28 01:58:06 2021 -0400 - - stor-510: Not classifying proxy-volume as pxd, even if protworx provisioner annotation is set - -commit 8aba1590582a3f9695602e7b67eacb4a333bea2b -Author: Ram -Date: Fri Sep 24 22:51:25 2021 +0530 - - Vendor updates sched-ops - - Signed-off-by: Ram - -commit df8c2cf49691750255fd2528b613dd143df1d5d6 -Author: Ram -Date: Fri Sep 24 22:50:58 2021 +0530 - - Update webhook configuration for v1.22+ k8s - - Signed-off-by: Ram - -commit 5649d7905f279dbb6de84f3d4c874abe62cbaec8 -Author: siva-portworx -Date: Mon Sep 27 13:11:03 2021 -0400 - - stor-506: Added worked around to delete the dataexport CR at the end, when all the snapshots are completed. - -commit 004803657a35ef8784279b7a60be061e9aff11c1 -Author: siva-portworx -Date: Sat Sep 25 17:09:57 2021 -0400 - - pb-1908: truncating the CR name and pvc names BY 63 char, while adding it as label in the dataExport CR - -commit 81341fbebede49e2c87ab114a2eba1df81a1267a -Author: siva-portworx -Date: Sat Sep 25 16:33:58 2021 -0400 - - vendor changes for truncate pkg - -commit 68d023ca0411fcf1430dfed3cd6c568d3f57fd0c -Author: siva-portworx -Date: Fri Sep 24 14:14:26 2021 -0400 - - pb-1908: Added backup and restore CR name and pvc name in the dataexport - CR. - -commit 11a01171d12169d7c4b9473fe9c2a70c3fcb91d1 -Author: Prashanth Kumar -Date: Mon Sep 20 13:44:40 2021 -0400 - - Handling kdmp in-progress job cancellation and deletion based on RetentionPolicy - -commit ba2e13b85471aa80b965d70d774cbb93a6dd1007 -Author: Prashanth Kumar -Date: Sun Sep 19 21:47:36 2021 -0400 - - vendor udpate for kdmp - -commit 97089f8df6351ee79232d83580492f7c11b28572 -Author: Aditya Dani -Date: Tue Sep 7 17:26:42 2021 -0700 - - Add a Snapshotter interface. - - - The snapshotter interface provides APIs for snapshots and restores. - - Extracted out the snapshotting functionality from the CSI driver and added it - to the snapshotter interface - - The goal is to separate out the snapshot functionality from the actual drivers so - that different components in stork can use it. - -commit 3d3c55483ac9eac974fd51d53cec64911c0b6682 -Author: Aditya Dani -Date: Tue Sep 21 17:48:16 2021 -0700 - - STOR-425: Do not silently fail cluster domain update commands for invalid domain. - - - Check if the input domain is part of the existing cluster domains. If it is not - fail the activate/deactivate command. - - Signed-off-by: Aditya Dani - -commit 324b5fbca9199057acf1ac88e6b5647b1f1ed12d -Author: Aditya Dani -Date: Tue Sep 21 16:45:59 2021 -0700 - - STOR-483: Skip filtering for volumes which do not have DataNodes populated. - - - Certain storage providers like Portworx proxy volumes or Portworx direct access volumes - do not have the volumeInfo.DataNodes populated. Do not filter out the request for - such volumes. - -commit d6c398ec818b224cf2872be82cf684c0ec94ff25 -Author: Aditya Dani -Date: Mon Sep 13 15:58:05 2021 -0700 - - [Portworx Driver] Add support for PX-Security enabled clusters. - - - Add support for backing and restoring PVCs which are provisioned - on a PX-Security enabled cluster. - - Backup: - - CSI Volumes: Use the token secret name/namespace specified - as a part of NodePublish secrets - - In-tree Volumes: Use the token secret name/namespace provided as annotations - - Save the secret name and namespaces as a part of backup.VolumeInfo.Options - so that they can be used while restoring. - - If a templatized namespace is provided, then store ${pvc.namespace} as a part - of backup.VolumeInfo.Options so that on restore the token secret in the namespace where - restore is being done is used. - - Restore: - - Use the token secret name and namespace provided as a part of backup options. - - If a templatized namespace is found use the secret in the namespace where the - PVC is being restored. - -commit 416cc7f6bb67e6dfbc7d798e2e6edc150dd22db0 -Author: Ram -Date: Mon Sep 20 17:03:24 2021 +0530 - - STOR-444: Update FinishTimestamp for failed applicationbackup - - stor-448: update labels & annot of namespace based on replace policy - stor-443: update activate/deactivate message for crds - - Signed-off-by: Ram - -commit 2f877c0c4f539d892e0ad6809ba38a0d4c0a5f79 (origin/master-siva-kdmp) -Author: Ram -Date: Wed Sep 22 16:29:08 2021 +0530 - - csidriverfix: handle completed backups - - Signed-off-by: Ram - -commit be7a00a6e6a32853c2d20e581ffb41f46deed480 -Author: Rohit-PX -Date: Tue Sep 21 17:58:55 2021 -0700 - - Add auth annotations to migrations triggered by migrationschedules - - Signed-off-by: Rohit-PX - -commit 46d39c7ba17439ac098c8c2140e74c89ffb1f90f -Author: Aditya Dani -Date: Tue Sep 21 18:04:13 2021 -0700 - - STOR-499: ApplicationRestores fail if there is a mix of CSI PVCs and other storage providers. - - - Skip the volumes which are not CSI PVCs in CSI driver restore path. - -commit 95b5446437ef4264a4c4761961b4263eac1d1d6a -Author: Aditya Dani -Date: Fri Sep 17 12:44:30 2021 -0700 - - STOR-459: CSI: Skip restore of PVCs if driver is not CSI - -commit 238047cfa43fc2bbb9523daa14c9b3acc86ed834 (origin/master-temp-siva) -Author: Ram -Date: Wed Sep 15 22:54:48 2021 +0530 - - vendor updates- kdmp - - Signed-off-by: Ram - -commit 0bba70b9c361c9c0537e391489b9bf761084d8bc -Author: Ram -Date: Wed Sep 15 22:55:23 2021 +0530 - - stor-455: Generic restore support - - Signed-off-by: Ram - -commit 40e5288a9f38f5433a86789e577c310b0c8eb91c -Author: Ram -Date: Thu Sep 16 19:32:23 2021 +0530 - - stor-485: add completed backup volume to backup list - - Signed-off-by: Ram - -commit 15fea41a173d59d1a0046288e2f910a1ada74471 -Author: Ram -Date: Wed Sep 8 19:25:29 2021 +0530 - - Set KDMP custom images to latest tag - - Signed-off-by: Ram - -commit 57f38c06d0be33c6eddc6db6668362d4aa916537 -Author: Ram -Date: Wed Sep 8 11:16:01 2021 +0530 - - vendor updates - - Signed-off-by: Ram - -commit b3519171190bae929abc05cbacab8c46e3a3ce22 -Author: Ram -Date: Tue Sep 7 23:43:44 2021 +0530 - - start kdmp controller from stork - - - implemented GetBackupStatus() for kdmp - - implemented DeleteBackup() for kdmp - - support generic backup type - - Signed-off-by: Ram - -commit f3d706348018e98171982e167d09f250a2893280 -Author: Ram -Date: Tue Sep 7 23:41:22 2021 +0530 - - vendor update schedops,kdmp - - Signed-off-by: Ram - -commit 2eb4df7710e38e4539a80b3e63a0850b69ecf611 -Author: Ram -Date: Mon Sep 6 22:06:14 2021 +0530 - - stor-462: detect pvcs to be collected for kdmp driver - - Signed-off-by: Ram - -commit 1cc475a3d99eb80bd33cc6241b7dca60ccd534b8 -Author: Ram -Date: Mon Sep 6 22:05:19 2021 +0530 - - stor-463: vendor updates KDMP - - Signed-off-by: Ram - -commit 62cec2a70e06d26382aa36378bf39525cb98fc68 -Author: Ram -Date: Wed Sep 8 15:41:43 2021 +0530 - - allow list of crd via v1beta1 apis - - - stork tries to list crds via v1 apis which is not supported - for older k8s versions - - Signed-off-by: Ram - -commit 5a1f8a4142bf301efe146d065684e72c6f07a019 -Author: Rohit-PX -Date: Thu Sep 2 10:50:12 2021 -0700 - - Add new parameter for kube-scheduler version - - Signed-off-by: Rohit-PX - -commit a33dbd5084a7288e991946006c7c3082df2ba8c4 -Author: siva-portworx -Date: Thu Sep 2 10:11:18 2021 -0400 - - stor:468 Changed the field name GenericBackupRepoKey to RepositoryPassword in backuplocation CR - -commit e3ed875d73038e495dc45c2de1882bfbf8b9f068 -Author: siva-portworx -Date: Wed Sep 1 04:32:47 2021 -0400 - - stor-467: Added genericBackupRepoKey field in backuplocation CR for - storing generic backup repo password. - -commit c2b5a3de3b0e25bb393079323d9aef0f92e00f05 (tag: v1.4.0) -Author: Rohit-PX -Date: Thu Aug 26 15:11:21 2021 -0700 - - Make version check optional in integration tests - - Signed-off-by: Rohit-PX - -commit dbf03a3e170f6888124d71d9c43bbc984ec0527d -Author: Rohit-PX -Date: Wed Aug 25 11:42:59 2021 -0700 - - New test for volume snapshot restore with vdbench data - - Signed-off-by: Rohit-PX - -commit 4ffc964e861ca7c1fba432d2e65e8d48ad448813 -Author: Ram -Date: Mon Aug 23 23:56:48 2021 +0530 - - PB-1363: Unable to take csi + px volume backups - - Signed-off-by: Ram - -commit 2a2a656c01729580f2a0666072898d750c103189 -Author: Aditya Dani -Date: Tue Aug 24 22:30:37 2021 -0700 - - Added migration failover and failback tests. - - - The existing migration tests setup one way cluster pairs between two clusters - however the migrations also happen only in one direction. Once the migration - is complete the resources are deleted and a reverse migration is triggered. - - This does not execute the scenario of failover and failback of the same - application and volume between the two clusters. - - Currently the test is only added for portworx driver. Using portworx encryted volumes with mysql statefulsets. - -commit 044b271694cafee5501da227a19bb4457f6e354b -Author: Ram -Date: Wed Aug 25 11:03:39 2021 +0530 - - check destination svc annoatation before assigning keys - - Signed-off-by: Ram - -commit 24078363cc2475142226d76c989f91ae6ff41e9c -Author: Ram -Date: Mon Aug 23 23:20:11 2021 +0530 - - Correct applicationbackup schema spec for BackupType - - Signed-off-by: Ram - -commit fa823b8b7139e0aff62c4675ab9fe5e1c1f2176b -Author: Ram -Date: Mon Aug 16 20:18:56 2021 +0530 - - vendor updates - - Signed-off-by: Ram - -commit f5abbcfc2627d89615649df9e427d0a45cb4a440 -Author: Ram -Date: Mon Aug 16 20:18:36 2021 +0530 - - support v1 crd based on k8s version - - Signed-off-by: Ram - -commit 670dcdf1439f167e0e61922dccb3d5b8e9139b8f -Author: Aditya Dani -Date: Wed Jul 28 14:38:18 2021 -0700 - - Fix the ClusterRoleBinding in prometheus specs. - -commit 4da3fdd765809e43ba29f2d39821aa46f97e6fbb -Author: siva-portworx -Date: Thu Aug 19 04:08:35 2021 -0400 - - stor-458: Added following changes in stork CRD for data mover feature. - - - Added backupType in ApplicationBackup and ApplicationBackupSchedule - CR definitions. - - Added storageClass in ApplicationBackupVolumeInfo definition. - -commit 748124ffafcbcbb67b87cae27cf0f3c8bb311e43 -Author: Aditya Dani -Date: Mon Aug 16 14:08:49 2021 -0700 - - Do not force delete pods in VolumeSnapshotRestore. - - - Force deleting the pods immediately deletes the pod objects from k8s etcd, - which gives a false indication to stork that the pods have been deleted. The - subsequent restore call fails since the underlying PVC is not yet detached by k8s. - - Signed-off-by: Aditya Dani - -commit e0416ca19d6f2e05f050389cc643378e67468eb0 -Author: Ram Suradkar -Date: Tue Aug 10 17:14:44 2021 +0000 - - move skipservicemodify check - - Signed-off-by: Ram Suradkar - -commit 7511262ac7ed7a5ffc7372c8659f9be71ab698fc -Author: Ram Suradkar -Date: Tue Aug 10 14:55:01 2021 +0000 - - [portworx] wait for driver to come online in case of in-place restore - - Signed-off-by: Ram Suradkar - -commit 6c1007168818a47db2f2a6ccd9ff9fbc1a2542f0 -Author: Ram -Date: Tue Aug 3 16:41:59 2021 +0530 - - vendor update hashstructure - - Signed-off-by: Ram - -commit b6ceadd67b1200f75dbb16c87d3bf7031c0953df -Author: Ram -Date: Tue Aug 3 16:31:05 2021 +0530 - - [migration] only update svc if source svc has been changed - - Signed-off-by: Ram - -commit f7458002924ff2ddd455d6f536459a30d142ffa8 -Author: Ram -Date: Thu Jul 29 18:58:47 2021 +0530 - - handle nil check for networkpolicy ipblock - - Signed-off-by: Ram - -commit b3358f1952fc1c3e5719ae66fd8f32cb65c4da51 -Author: Aditya Dani -Date: Fri Jul 23 23:50:06 2021 -0700 - - STOR-441: Migrations are deleting the backing PX volume. - - With the recent changes for updating the PVC size as a part of Migrations we are now doing the following - - - Update the reclaim policy on the PV to Retain - - - Delete the PVC - - - Recreate the PVC and PV - - However due to a bug the Retain policy was not set and the volume was getting deleted. - -commit 2bdef2f941a58f96e29f6844c2ffbd699f2f41e0 -Author: Ram -Date: Thu Jul 22 22:58:43 2021 +0530 - - Fix integration tests intervalScheduleCleanupTest - - Signed-off-by: Ram - -commit f3707a86a1257bdc09f67ea08b6ecf25e2458260 -Author: Ram -Date: Wed Jul 21 23:14:19 2021 +0530 - - stor-415: remove svc ports while cloning services - - Signed-off-by: Ram - -commit 3e15faa6803788dbe45894c16c801b3ae17a6974 -Author: Ram -Date: Tue Jul 20 18:30:09 2021 +0530 - - stor-435: Migration failing, pvc already exists - - Signed-off-by: Ram - -commit d15d8f63d2e0a68ece8df1a9e7d2ddcf39e62156 -Author: Ram Suradkar -Date: Wed Jul 21 14:28:10 2021 +0000 - - stor-439: Backup respenctive pv object for pvc resourcetypes - - Signed-off-by: Ram Suradkar - -commit 470afc7e3bf8bd698ea002c99bb4fa22e76d2b63 -Author: siva-portworx -Date: Wed Jul 21 11:12:40 2021 -0400 - - Added extra check such we skip volume backup only ResourcType is not empty and does not contain PVC in it - -commit 708689004a4503ca602e5f596be2c58545f853b4 -Author: Ram -Date: Fri Jul 16 23:44:32 2021 +0530 - - Omit checking sha for stork version checks - - Signed-off-by: Ram - -commit 5a50585661c03ff438c56eed7b2db6c9e2179585 -Author: Ram -Date: Wed Jul 14 18:37:02 2021 +0530 - - stor-432: storkctl does not activate/deactivate mongodb CR's - - Signed-off-by: Ram - -commit fd83b8a6e053f2bbee101dfc7495a76d34224544 -Author: Ram -Date: Wed Jul 14 18:36:47 2021 +0530 - - use pluralizer rules to collect CRD - - Signed-off-by: Ram - -commit 0922aafd843f0e8befbaa0257728e1eb67f4507e -Author: Ram Suradkar -Date: Sun Jul 11 06:46:23 2021 +0000 - - Removed incorrect return statement in the backupVolumes function - -commit d828f3b24c082ce3721d63a54abed3e2cfba393c -Author: Aditya Dani -Date: Fri Jul 9 11:02:13 2021 -0700 - - Add a nil check in volume snapshot schedule metrics reporting. - -commit adcbc5df0dea78b0621b1a9535910dfe3e836327 -Author: Aditya Dani -Date: Thu Jul 8 17:10:55 2021 -0700 - - STOR-393: Add a SAN to the webhook certificate created by stork. - - - Delete the old k8s secret which had the invalid cert and create a new one - on startup. - -commit 590fd09760ecbc3db988fffe86ba6d081539b055 -Author: Ram -Date: Thu Jul 8 18:44:21 2021 +0530 - - Update service account resource instead of delete/create - - Signed-off-by: Ram - -commit cfacfbf7ddcc8898aa426455d8cee68ff935e742 -Author: Ram -Date: Wed Jul 7 17:19:10 2021 +0530 - - integration-test to verify stork version - - Signed-off-by: Ram - -commit d044b4f0ca6a489280da3eebabc18524c542b4bf -Author: Ram -Date: Wed Jul 7 17:12:16 2021 +0530 - - stor-423: add mongodb cr backup/migration support - - Signed-off-by: Ram - -commit b965cf79065ec2d4aa20c4ebc46aee55a144132a -Author: Aditya Dani -Date: Wed Jul 7 11:00:13 2021 -0700 - - Fix the issues caused by git merge conflicts. - -commit cfc0a9c0dcd6a9dccea0f8593d8ad0c621211350 -Author: Aditya Dani -Date: Fri Jun 25 08:39:04 2021 -0700 - - Handle exponential backoffs for ApplicationRestores. - -commit f50b1ef595dc556833a41a8982aa7278bde2236a -Author: Aditya Dani -Date: Thu Jun 10 07:45:16 2021 -0700 - - Do not fail migration/backups when the storage provider returns ErrStorageProviderBusy - - - Handle AWS and Portworx providers. - - If the storage provider returns a known "BUSY" error the driver wraps it - and returns to the controller. - - If the controller sees this error, instead of marking the backup as failed it will retry - in the next reconcile loop. - -commit cc11e976bc0d0b43e969d08bb380a714c197bb7c -Author: Aditya Dani -Date: Thu Jun 3 16:22:16 2021 -0700 - - Update openstorage vendor to release-9.1 - -commit f9d222a92ec86869ef5e31240ff0ba5f56fe0927 -Author: Ram -Date: Thu Jul 1 18:28:23 2021 +0530 - - stor-419: store stork version in configmap - - Signed-off-by: Ram - -commit e3fd22331e6d980232ec7afeaf4502507a702e7f -Author: Ram -Date: Thu Jul 1 16:21:43 2021 +0530 - - PB-1679: cluster-scope resource collection - - Signed-off-by: Ram - -commit dcac8089359ae81fc40c4e9293454f4b4b5e777b -Author: Rohit-PX -Date: Wed May 26 01:25:54 2021 -0700 - - Integration test for creating reverse clusterpair using storkctl - - Signed-off-by: Rohit-PX - -commit 3e2efa5ce47c5b7fd8817b3643cbeb3a989a2606 -Author: Ram -Date: Fri Jun 25 18:37:53 2021 +0530 - - Allow setting k8s client api rate limiter - - Signed-off-by: Ram - -commit 72271f246855c2421e84a75d8fdb3782cc2f3691 -Author: Prashanth Kumar -Date: Fri Jun 25 04:45:36 2021 -0400 - - Backing up selected resource and all resoucres of given namespace - - User can choose to backup all resources in one namespace and selected resources - in another namespace. - - Signed-off-by: Prashanth Kumar - -commit d983afae5f3ddb731193b6648e156ce766ba94d1 (origin/master_stor-409) -Author: Prashanth Kumar -Date: Thu May 27 08:44:29 2021 -0400 - - Reading rule CR from kube-system ns when multiple ns are selected for backup - - Signed-off-by: Prashanth Kumar - -commit 2e5808966ce91feecfa5558d477ea05f6526469c -Author: Ram -Date: Tue Jun 8 22:15:00 2021 +0530 - - stor-397: merge annotation for SA during restore/migration - - Signed-off-by: Ram - -commit 9833cbac3ed5bd431bed21a8c976a3dd2f97dc72 -Author: Ram Suradkar -Date: Tue Jun 8 14:03:46 2021 +0000 - - codegen auto-generated files - - Signed-off-by: Ram Suradkar - -commit ce2b1f3a82bdb95838c967e8c6bc196551aa7eed -Author: Ram Suradkar -Date: Tue Jun 8 14:02:57 2021 +0000 - - stor-262: add user option to skip service resource updates - - Signed-off-by: Ram Suradkar - -commit 4884d373602a85c5c92db097328ccdad95813f86 -Author: Ram -Date: Mon Jun 21 17:04:01 2021 +0530 - - stor-411: prevent invalid cr updates - - Signed-off-by: Ram - -commit 68cd7af7b795f0e092129386d2f0b4d366add5a2 -Author: Ram -Date: Mon Jun 21 17:01:03 2021 +0530 - - Dont collect service accounts for user with no list permission in namespace - - Signed-off-by: Ram - -commit a57a188287caa8e4dcb6e4f7a2f3a49c4683ee94 -Author: Jim Ou -Date: Mon Jun 14 16:26:14 2021 -0600 - - [STOR-383] add metrics for volume snapshot schedule - -commit 75fcb2c975ff858d83b4102cd022f75cb8b298d5 -Author: Prashanth Kumar -Date: Fri Jun 4 07:56:41 2021 -0400 - - Backing up NetworkPolicy and PodDisruptionBudget objects - - NetworkPolicy with CIDR set are not backed up - - Signed-off-by: Prashanth Kumar - -commit e04bd349f164fde6ca79eeb5f4d030e3168fe13a -Author: Aditya Dani -Date: Fri Jun 11 11:49:48 2021 -0700 - - Add a new annotation to skip PVCs from stork's scheduler scoring algorithm. - - - Use the stork.libopenstorage.org/skip-scheduler-scoring: true annotation on PVCs - which should not be considered while scoring nodes in scheduler's prioritize request. - - Signed-off-by: Aditya Dani - -commit d41e52c3055717fd90828b16af13c1eb72de309d -Author: Prashanth Kumar -Date: Mon Jun 7 07:33:55 2021 -0400 - - Skipping backing up of gke-resource-quotas - - On GKE when a namespace is created by default this gets - created so no need of backing it up - -commit 41fd4bb8122fc9d9d2ffb730c9f34e6c158c5a51 (origin/Jim) -Author: Ram -Date: Mon Jun 7 14:06:40 2021 +0530 - - fix namespaced schedule policy cache - - - use seperate cache watch listner for namespaced policy cache - store - - Signed-off-by: Ram - -commit cf4837a92f72ef00a1ca2f647b79802175b80b8e -Author: Ram -Date: Mon May 24 20:12:36 2021 +0530 - - Update latest pvc specs while doing migration - - Signed-off-by: Ram - -commit f16f61ba373c03aa930232c37954a82ee410a915 -Author: Jose Rivera -Date: Fri May 28 19:24:15 2021 -0700 - - Yum has vulnerabilty issues, no need to install and use that. Just use microdnf. - - Signed-off-by: Jose Rivera - -commit f93a58d5cb17e3ffe62cd13187cf39e2078c2018 -Author: Ram -Date: Tue May 25 18:52:15 2021 +0530 - - Return err check while updating cloned namespace - - - fix issue stor-391: application clone fails if dest namespace - already exists - - Signed-off-by: Ram - -commit 6a04cfb8034630c87556664951a58910744f0335 -Author: Dinesh Israni -Date: Sun May 2 18:27:48 2021 -0700 - - Update schedule package to use namespaced schedule policy - - If the namespaced policy doesn't exist it looks for the cluster scoped - policy with the same name. - Also updated all controllers to pass in namespace - - Signed-off-by: Dinesh Israni - -commit 49d22d604a33f6fbfac260bd7224433561515c28 -Author: Dinesh Israni -Date: Sat May 1 13:15:33 2021 -0700 - - Add CRD for namespace scoped schedule policy - - Signed-off-by: Dinesh Israni - -commit f4ae1e890236a3355a5d475722baa020bccd9baa -Author: Dinesh Israni -Date: Sat May 1 13:15:04 2021 -0700 - - Fix code gen for crds - - Signed-off-by: Dinesh Israni - -commit c7e2ddb6c30e3439cfb0670524939ce958cb0198 -Author: Dinesh Israni -Date: Sat May 1 13:13:15 2021 -0700 - - Vendor update - - Signed-off-by: Dinesh Israni - -commit 5a5403beeb5483b24f1501a02d9902ab80e465a3 -Author: Ram -Date: Fri May 7 15:52:40 2021 +0530 - - Configure rsync time for application controllers - - - avoid sdk update if backup is already on final stage - - Signed-off-by: Ram - -commit 23e047a46a877f3c4b02d87dce02e1ed24379eba -Author: Rohit-PX -Date: Wed Apr 28 17:34:44 2021 -0700 - - Add scaled cloudsnapshot test - - Signed-off-by: Rohit-PX - -commit 6b98cc805c5fe13cb6363c75b9de9382bde49177 -Author: Rohit-PX -Date: Thu Apr 15 11:52:28 2021 -0700 - - Update the ubuntu repo in travis yaml file - - Signed-off-by: Rohit-PX - -commit 467fbbf7c0dfc19ba34e16c31ee83898ee32a7f2 -Author: Rohit-PX -Date: Thu Apr 15 10:40:09 2021 -0700 - - Add permissions for stork-scheduler for k8s 1.21 - - Signed-off-by: Rohit-PX - -commit 362c554a2eed8df8599b65d7943264118fda0efc -Author: Ram -Date: Thu Apr 8 12:20:49 2021 +0530 - - Update grafana dashboard for changed metrics name - - Signed-off-by: Ram - -commit c9a91c3eb342a719d4e1f470259a016795f51727 -Author: Rohit-PX -Date: Mon Apr 5 18:10:25 2021 -0700 - - Vendor updates for torp and schedops - - Signed-off-by: Rohit-PX - -commit 4ea4aa09fac700aa5d3b0d532a752919e15d5323 -Author: Ram -Date: Thu Apr 1 23:10:40 2021 +0530 - - Add pluralizer rules for prometheus crd - - Signed-off-by: Ram - -commit 30fe03cc1e3360d2448782da6b5f8d5e62077dd5 -Author: Ram -Date: Tue Mar 30 22:09:48 2021 +0530 - - rename stork prometheus metrics with standard prefix - - Signed-off-by: Ram - -commit 241d285656f649e5efd5c1c9dd6ae53a4640e221 -Author: Ram -Date: Thu Mar 25 21:59:46 2021 +0530 - - Vendor updates for torpedo,sched-ops - - Signed-off-by: Ram - -commit ecb036fcdacd49de18ce4643c21085a9289b3d91 -Author: Ram -Date: Thu Mar 25 21:59:11 2021 +0530 - - Integration test for webhook allow dryRun options - - Signed-off-by: Ram - -commit 0e759fe18fdca84ae875bb37f01f7cc65ad7162f -Author: Rohit-PX -Date: Mon Mar 22 14:42:14 2021 -0700 - - Clusterpair failure tests - - Signed-off-by: Rohit-PX - -commit 84fd96942e7e580aca453d5c881edb19e5a574d4 -Author: Ram -Date: Mon Mar 22 09:49:06 2021 +0530 - - Add appreg suspend support for CR's - - - perconadb - - prometheus - - rabbitmq - - kafka (strimzi) - - postgress(acid.zalan.do) - - Signed-off-by: Ram - -commit 34b4295cddfb3f50a4e90d2632d0e5d12bdc3870 -Author: Ram -Date: Fri Mar 19 09:14:49 2021 +0530 - - prometheus alerts for stork metrics - - Signed-off-by: Ram - -commit 309caf861575c27344a573a1199e8f4cd05ba437 -Author: Grant Griffiths -Date: Tue Mar 2 20:21:56 2021 -0800 - - Fix API breaking changes and makefile - - Signed-off-by: Grant Griffiths - - Modify the UTs to adhere to new k8s 1.20 client-go fake client. - - Signed-off-by: Aditya Dani - -commit 5e50c870b93868dcb8c0bdc8ff355bcc8f5d22cd -Author: Grant Griffiths -Date: Tue Mar 2 20:21:32 2021 -0800 - - Go modules and k8s 1.20 client upgrade - - Signed-off-by: Grant Griffiths - -commit 6a6555e31aed9a8d57c24ce46cc62334b402dd90 -Author: Ram -Date: Thu Feb 18 23:31:58 2021 +0530 - - Detect newly registered crd and create appreg entry in stork - - Signed-off-by: Ram - -commit 625843e0adaaf0d862f2d17a542922ac6fcfd4f5 -Author: Ram -Date: Mon Feb 1 11:32:22 2021 +0530 - - stor340: Redo cluster-pairing automatically - - Signed-off-by: Ram - -commit 9161d8c105bf53a8320134e09706ee04c36de271 -Author: Rohit-PX -Date: Thu Mar 18 12:38:06 2021 -0700 - - Add correct CLI option description for test-deploy - - Signed-off-by: Rohit-PX - -commit 016d319198f857e6221bd6e3bd2131b726a86657 -Author: Ram -Date: Mon Mar 15 16:54:19 2021 +0530 - - configure side-effect parameter for stork webhook - - Signed-off-by: Ram - -commit f4dec0e2027d9715022845068f3a67a4c757cc7a -Author: Rohit-PX -Date: Wed Jan 6 22:41:27 2021 -0800 - - Add separate CLI params for test, source and dest kubeconfig - - Signed-off-by: Rohit-PX - -commit 24958a4afe4340577f729d5fdfcd898769ed27e1 -Author: siva-portworx -Date: Wed Mar 10 13:04:48 2021 -0500 - - Added logic to exclude the controlled resources from GetResources API. - - - Define a list for excluding controlled resources being returned - as part of response of resourceCollector GetResources API. - - For now added configmap kube-root-ca.crt, which was getting created by - kube-controller-manager in every namespace. - -commit 23c03ff8b8e7093d8f786db933921c7cb3599908 -Author: Prashanth Kumar -Date: Wed Mar 10 03:17:22 2021 -0500 - - Adding support for fetching resources based on requested resource types - - Signed-off-by: Prashanth Kumar - -commit 3f9d85b6a2bdc01c6571ffb16cf87c895b9e27c2 -Author: Rohit-PX -Date: Mon Mar 15 12:31:31 2021 -0700 - - Add sec annotations to create mig method, check for auth in tests - - Signed-off-by: Rohit-PX - -commit a4a9149efb22a87292a9f268c5195004b6bf9234 -Author: siva-portworx -Date: Thu Mar 11 05:13:08 2021 -0500 - - Updating clusterIps field of service to nil in prepareServiceResourceForCollection. - -commit e30034fe41997cf2c67a87842c9db4ad0d0aa606 -Author: Ram -Date: Mon Jan 11 23:52:59 2021 +0530 - - vendor updates sched-ops - - Signed-off-by: Ram - -commit 5237b72f534ed106ff7cb652304398f186ffa776 -Author: Ram -Date: Mon Jan 11 23:52:34 2021 +0530 - - addeds UTs for migration/backup schedule metrics - - Signed-off-by: Ram - -commit dcd3673d9fd0d25b7c0ac8786722ad59c6a63815 -Author: Ram -Date: Thu Jan 7 13:29:23 2021 +0530 - - stor-331: add metrics support for backup/migration schedules - - Signed-off-by: Ram - -commit c6aa72f7275a855380b9cddc448fd038ab22eb34 -Author: Ram -Date: Tue Mar 2 23:39:45 2021 +0530 - - Use python3 version to install libs for google-sdk - - Signed-off-by: Ram - -commit 810667298b0bd7e4622563cf5eea11d733ecf8e5 -Author: Ram -Date: Fri Dec 18 23:57:08 2020 +0530 - - add grafana dashboard for migration and application controllers - - Signed-off-by: Ram - -commit df631f75442a9bbbefb7808afee9b60f993eea1b -Author: Ram -Date: Mon Nov 30 23:27:57 2020 +0530 - - Add Readme for stork metrics setup instructions - - Signed-off-by: Ram - -commit a83d476388327abb6cc156111baf5ef4f32c9493 -Author: Dinesh Israni -Date: Fri Feb 12 07:29:57 2021 -0800 - - Get ApplicationRegistrations only once when preparing resources - -commit a97d89752b7417f34fdebb444f2807bc408c25cf -Author: sivakumar subramani -Date: Wed Feb 3 02:58:10 2021 -0500 - - Reduce the backupVolumeBatchCount value to 3 from 10, to avoid timeout failures - Added BACKUP-VOLUME-BATCH-COUNT env and made default batch count to 3 - -commit c378eb4f0f0ad7e7417787e98837dff3cb9d143f (origin/2.6.0) -Author: Dinesh Israni -Date: Tue Jan 26 21:28:20 2021 -0800 - - storkctl: Check for error state of snapshot - -commit 771dbcf6355d16b2f0c515afdec96a892fb6df5c -Author: Dinesh Israni -Date: Tue Jan 26 21:27:59 2021 -0800 - - Vendor update for snapshotter - -commit 999b4fe57514ffebbd509426d8a0b793ca07fb41 -Author: Ram Suradkar -Date: Mon Jan 4 16:06:06 2021 +0000 - - add support for replicaset backup/migration - - Signed-off-by: Ram Suradkar - -commit b59f19a4fd701fc4fa849e1563de2d6a2594cc64 -Author: Ram Suradkar -Date: Fri Jan 22 10:35:59 2021 +0000 - - Create pvc objects if volumeOnly migration is enabled - - Signed-off-by: Ram Suradkar - -commit 7f434b6459066a997ba0ce112c782f4e8f058847 -Author: sivakumar subramani -Date: Fri Jan 22 14:31:29 2021 -0500 - - Corrected the error handling, while calling CloudBackupCreate api. - -commit 62471255a1950f0e030d96dc3ab729202bb8ac79 -Author: Prashanth Kumar -Date: Thu Jan 21 12:00:25 2021 -0500 - - [Portworx]: Setting backup size to zero on failure when fetching size - -commit e25223e916556a5d268a5f5fbe69e1f6c173ab3b -Author: sivakumar subramani -Date: Thu Jan 21 13:52:33 2021 -0500 - - Fixed the issue in updating volumeInfo, when all volumes are successful. - -commit 13500a25d0594facc6040b825d0b8b7033a78387 -Author: Rohit-PX -Date: Tue Jan 19 18:19:37 2021 -0800 - - Explicitly add linux as the GOOS - - Signed-off-by: Rohit-PX - -commit eab938d15257c9c8e2c0a063107ac393fcb56692 -Author: sivakumar subramani -Date: Tue Jan 12 11:04:50 2021 -0500 - - Passing namespace list in batches to resourceCollector.GetResources() - -commit e8f89f1bdccc1c40b2da55520e148f697922bbb8 -Author: Dinesh Israni -Date: Sun Dec 20 14:17:31 2020 -0600 - - Use path from updated backup when checking for deletion - -commit 1248b13b365b56c43cd7c204dc12315ee471e0e6 -Author: Ram -Date: Sun Dec 20 14:38:34 2020 +0530 - - vendor update apiextension fake client - - Signed-off-by: Ram - -commit 44130387281274e46f0972dea9f313c05f3d626c -Author: Ram -Date: Sun Dec 20 13:49:17 2020 +0530 - - wait for crds to register while starting metrics collection - - Signed-off-by: Ram - -commit e50f35b1d1d331969bfcc7e7c443e7fb66b3b3f0 -Author: Dinesh Israni -Date: Fri Dec 18 20:14:24 2020 -0600 - - Don't check for namespace mapping again when finding objects to delete - - Namespace is already being mapped to the destination - -commit 26f1105e639f60d0cabf9d6a601df507215ef3ef -Author: Ram -Date: Fri Dec 18 19:06:11 2020 +0530 - - retry v1 crd registration while doing app restore - - - migration: err check crd reg before validating crds - - Signed-off-by: Ram - -commit 9359272b8963f4011887d4e31935f7bbd6bc9cf6 -Author: Ram -Date: Tue Dec 15 20:44:44 2020 +0530 - - vendor updates sched-ops - - Signed-off-by: Ram - -commit e43523786e8d4eca46024fc974c5b4bf1687203c -Author: Ram -Date: Tue Dec 15 19:33:22 2020 +0530 - - Use apiextensionV1 api to register CRDs objects - - - store replica count for crd in annotation - - use replica count for crd to disable/enable CR resources - - apply crds as per apiversion for migration - - for v1beta1 to v1 conversion set x-kubernetes-preserve-field to true - - Signed-off-by: Ram - -commit 65ef486daaec48740c2c1808c791401d64f42977 -Author: sivakumar subramani -Date: Thu Dec 17 07:37:05 2020 -0500 - - Added retry logic, while updating the ApplicationBackupStageApplications status to CR. - - If the CR update fails, we retry for 10 times with 10 sec delay. - -commit 86bb1aeffed41cc49eeb31290e96498c922c5947 (origin/master-stork-pb1006) -Author: Grant Griffiths -Date: Mon Dec 14 14:54:34 2020 -0800 - - [CSI] Add creation of default VolumeSnapshot classes - - Signed-off-by: Grant Griffiths - -commit 2abeb00f8fda83faa410b949aefe060d440bd0d0 (origin/master-stor-319) -Author: Ram -Date: Tue Dec 15 12:27:59 2020 +0530 - - Misc fix - avoid unnecessary export of metrics variables - - - addressed review comments - - Signed-off-by: Ram - -commit f9aa45605f36483075bd1de7a8e8e141dd564630 -Author: Ram -Date: Tue Dec 15 12:13:53 2020 +0530 - - UT's for stork prometheus metrics - - Signed-off-by: Ram - -commit f5080570034c1cf10a622223ac050b8298e33602 -Author: Ram -Date: Tue Dec 8 19:27:49 2020 +0530 - - vendor updates sched-ops - - Signed-off-by: Ram - -commit 9e6f90c25474e3e0b5bd9ed95475e09572dc4c9c -Author: Ram -Date: Tue Dec 8 19:26:59 2020 +0530 - - prometheus metrics for stork controllers - - - applicationbackup, restore and clone - - clusterpair, migrations - - Signed-off-by: Ram - -commit 85fd554ba204a6122c287b1e6cf2e5e4ef0b7fe6 -Author: sivakumar subramani -Date: Fri Nov 20 10:38:28 2020 -0500 - - Added retry logic for CloudBackupCreate failure. - -commit 057cf6eddca9f744e3cbe8d80c00aa7f860b1c0c -Author: Grant Griffiths -Date: Thu Dec 3 17:32:08 2020 -0800 - - [CSI] Remove cleanup checks for GetBackupStatus - - Signed-off-by: Grant Griffiths - -commit fe97def95ced938b34f64b2335610ea263e13c04 -Author: Grant Griffiths -Date: Thu Dec 3 23:11:40 2020 -0800 - - [CSI] Add CSI PV and PVCs back into restore resources - - Signed-off-by: Grant Griffiths - -commit 6cc44cc09147c696e92c0f80f6810f2c73d02bb0 -Author: sivakumar subramani -Date: Tue Dec 1 13:31:15 2020 -0500 - - Applying IncludeResource filter before deleting resources for CSI driver - -commit 6ec732a6982020fa3463cae399b5b43d9369e0b0 -Author: sivakumar subramani -Date: Mon Nov 30 08:39:58 2020 -0500 - - Avoid including non-csi volume ad part of csi's GetBackupStatus API. - -commit f4170587fb75644fd06a777efd8d48a3fb6ce40e -Author: sivakumar subramani -Date: Fri Nov 27 02:32:39 2020 -0500 - - Added retry logic, when CR updates fails for Inprogress status. - - - Also return VolumenInfo list in the case of failure in GetBackupStatus - implementation in csi driver. - -commit 17590efcd701adf12bc24ece11bf518143990859 -Author: Dinesh Israni -Date: Tue Nov 24 22:34:41 2020 -0600 - - [CSI] Make the last part of the backup name unique - -commit 7ed4cb22e74b37ddcdf374fc5a36faf64bd9e349 -Author: Grant Griffiths -Date: Thu Nov 19 22:53:20 2020 -0800 - - [CSI] Fix multi-namespace backups - - Signed-off-by: Grant Griffiths - -commit ad3167ab2f03abd518d6b0462f485d0b1e74062c -Author: Grant Griffiths -Date: Tue Nov 17 10:26:29 2020 -0800 - - [CSI] Restore cleanup without backup obj and various fixes - - - Restore cleanup and cancel no longer depends on backup obj being - present. - - Use PVC size for backup/restore info sizes - - Fix VolumeSnapshot skip check to not look at group - - Signed-off-by: Grant Griffiths - -commit 2bfd876b181f530f3ff989089b16622e06ab4a09 -Author: Grant Griffiths -Date: Fri Nov 13 21:26:15 2020 -0800 - - [CSI] Add support for Replace Policy - - Signed-off-by: Grant Griffiths - -commit fa169aa4df8846006cad9ee3c6563fe41e3b1190 -Author: Ram -Date: Mon Oct 26 12:04:15 2020 +0530 - - vendor updates - sched-ops - - Signed-off-by: Ram - -commit 184358e90a046cf81b0108adb5e4d97204befece -Author: Ram -Date: Mon Oct 26 12:02:40 2020 +0530 - - allow disabling cronjob support for migration - - - add storkctl option to activate/deactivate cronjobs - - Signed-off-by: Ram - -commit a8160b590191e06b3fa6d11f31055bc731719fd6 -Author: Grant Griffiths -Date: Fri Nov 13 17:37:49 2020 -0800 - - [CSI] Namespace mapping, skip VS obj in includeObject, and UID fix - - Signed-off-by: Grant Griffiths - -commit 26fa22e1220f34fc3e1732cf6c18a14ef84ac15a -Author: Rohit-PX -Date: Thu Nov 12 18:28:47 2020 -0800 - - Vendor changes for torpedo - - Signed-off-by: Rohit-PX - -commit 5be37c9e6b59c5b7d9f8412fbb10ad471adadd48 -Author: Rohit-PX -Date: Tue Nov 3 12:51:03 2020 -0800 - - Add new param for config map name for generic csi drivers - - Signed-off-by: Rohit-PX - -commit e4cb462a5fffd36956c26bb8acb392aef2fe135c -Author: Prashanth Kumar -Date: Tue Oct 27 18:25:58 2020 +0000 - - Reporting volume size in bytes for EKS - -commit d8a220614e1111679d4ee074d1922ffc9d7aff7c -Author: Dinesh Israni -Date: Thu Nov 12 13:26:40 2020 -0800 - - Prune migrations even for PartialSuccess - - Some resources might always fail to be migrated, this causes the status for all - PartialSuccess migrations to be saved. Only need to keep the last one in this - case - -commit b798b8a785754906efb3cda7fce64d3633c047b2 -Author: Grant Griffiths -Date: Thu Nov 12 13:30:03 2020 -0800 - - [CSI] Fix CSI Backup to correctly cleanup on failures - - Signed-off-by: Grant Griffiths - -commit 60fe6a774b34b146ced0c42a658c1a7d89105c0c -Author: Grant Griffiths -Date: Tue Nov 10 17:25:58 2020 -0800 - - [CSI] Make VolumeSnapshot objects unique per backup request - - Signed-off-by: Grant Griffiths - -commit 7f5bc87677c668cb7906d5492e7252ff92ef9d92 -Author: Dinesh Israni -Date: Wed Nov 11 13:46:25 2020 -0800 - - Use client from resource collector when checking for PVC ownership - - Without this, if the resource collector is called for a remote cluster it tries - to fetch objects from the local cluster - -commit 38d537860e07ba14bf1c033ad250a2f0c1cc46d7 -Author: Grant Griffiths -Date: Tue Nov 10 03:05:30 2020 -0800 - - [CSI] Add check to make sure VolumeSnapshot and VSC are cleaned up - - Signed-off-by: Grant Griffiths - -commit 52f8f1ff2bce1fec83e3701b21dd2ef062ea1248 -Author: Grant Griffiths -Date: Tue Nov 10 01:49:57 2020 -0800 - - [CSI] Only remove bind-completed and bound-by-controller annotations - - - On CSI restore, we used to remove all kubernetes.io annotations - - We must only remove bind-completed and bound-by-controller as there - are other valid kubernetes.io annotations - - Signed-off-by: Grant Griffiths - -commit 302d84567bc648555cb65d148effe35ed27b49a0 -Author: sivakumar subramani -Date: Wed Oct 28 23:22:57 2020 -0400 - - Added log level change step in SIGUSR1 singal handler of dbg pkg. - -commit 263a748f2e2020410cea6875f43c276bff29fe7b -Author: Grant Griffiths -Date: Wed Oct 28 17:19:34 2020 -0700 - - Don't skip VolumeName update on CSI driver check failure - - Signed-off-by: Grant Griffiths - -commit 45b8e5bb284ba103c7d02e3276a47f50d899b07d -Author: Dinesh Israni -Date: Tue Oct 27 10:24:56 2020 -0700 - - Don't collect cluster scoped resources from ApplicationRegistration - -commit ad607f0fe8d64def47ba3e284cca45fd99c9e387 -Author: Dinesh Israni -Date: Tue Oct 27 10:14:39 2020 -0700 - - Fetch ApplicationRegistration only once when preparing resources - -commit 4df3ef56e90025b567e911d22989487a28dcb7e5 (origin/stor-292) -Author: Rohit-PX -Date: Thu Sep 10 18:53:31 2020 -0700 - - Add security annotations to CRDs for auth-runs - - Signed-off-by: Rohit-PX - -commit acbcc4d6f412159be248d8dbc3d0d7a36f98743a -Author: Grant Griffiths -Date: Thu Oct 15 13:03:25 2020 -0700 - - [CSI] Add restoreInfo for initial restore and during status check - - Signed-off-by: Grant Griffiths - -commit 82b35eade174bac1ef6d9f9e5e814bab519bb35e -Author: Ram -Date: Thu Sep 24 23:24:39 2020 +0530 - - add UT's for stork extender and health monitor metrics - - Signed-off-by: Ram - -commit 96c79ea053c8011fd24b352c865133caa7e6da9d -Author: Ram -Date: Thu Sep 24 23:22:44 2020 +0530 - - re-org metrics constant to respective pkgs - - Signed-off-by: Ram - -commit 947a6d8f23c106585647f253014345bb3583a0c9 -Author: Ram -Date: Thu Sep 24 23:21:36 2020 +0530 - - vendor updates prom testutil pkg - - Signed-off-by: Ram - -commit ec5afdb15404076e3a146dfd53157ae4c6f8a054 -Author: Ram -Date: Wed Sep 2 23:39:40 2020 +0530 - - Specs to enable promethous metrics for stork - - Signed-off-by: Ram - -commit 453be91054eff710cb29af4fb1ee42fc0e3262ed -Author: Ram -Date: Wed Sep 2 23:37:47 2020 +0530 - - Prometheus metrics for stork extender and monitor - - - added prometheus metrics for stork extenders which - covers hyper,non and semi-hyperconverged pod counter - - added stork monitor metrics for no of pod rescheduled - by stork monitor - - Signed-off-by: Ram - -commit 7428dcbdbde0aae8fdc94f671a9f32ad6c14648c -Author: Grant Griffiths -Date: Tue Oct 13 10:18:47 2020 -0700 - - Fix for non-CSI PVC restore not erroring out - - Signed-off-by: Grant Griffiths - -commit 9b16a998e32b64c72efdf40cf7d8c9b8411894c8 -Author: sivakumar subramani -Date: Sat Aug 29 11:39:56 2020 -0400 - - Submiiting the volume StartBackup in a batch count of ten. - - - This way, we will update the volume backup status - frequently to the CR content. - -commit 6254ad1390988ae2f183ec8f1e85d8cb071c3761 -Author: Grant Griffiths -Date: Mon Jul 20 22:13:20 2020 -0700 - - Add CSI driver snapshot implementation - - Signed-off-by: Grant Griffiths - -commit aa955bb559b2befc6bbbe17881bcdad09545c1cc -Author: Grant Griffiths -Date: Mon Jul 20 22:12:25 2020 -0700 - - Add external-snapshotter to vendor for snapshotter client - - Signed-off-by: Grant Griffiths - -commit eb6c180439ad1136182e9b3f1477a63ca6bc9e0e -Author: Ram -Date: Thu Sep 10 21:53:37 2020 +0530 - - vendor updates for openstorage-7.0 - - Signed-off-by: Ram - -commit 717c7e9e2deaa2ecdad7d57b2428fe5521ba3497 -Author: Ram -Date: Thu Sep 10 21:51:59 2020 +0530 - - [portworx] Allow passing delete local snapshot flag to CloudSnapCreate api - - Signed-off-by: Ram - -commit 372aaa37b267008f09f08dd605bed192de5d20cd -Author: Ram -Date: Thu Sep 3 12:56:30 2020 +0530 - - Check for clusterdomains status only if sync-dr is configured - - Signed-off-by: Ram - -commit 84e4083dc39bf40b6c99f786240725bdfc908cdb -Author: Ram -Date: Wed Sep 16 10:46:38 2020 +0530 - - Update go version to 1.13.1 for integration test container - - Signed-off-by: Ram - -commit 7ac0941b63a97e00c4be777f7b0e0b116975e00c -Author: sivakumar subramani -Date: Thu Sep 10 05:56:34 2020 -0400 - - Applying label selector for clusterRole, clusterRoleBinding and ServiceAccount resources. - -commit 1c2360c11cfe5029d00884c4cded3483b432e2d6 -Author: Ram -Date: Mon Sep 14 23:09:53 2020 +0530 - - UT's to test suspend/resume of multiple migrSchedules - - Signed-off-by: Ram - -commit 17295a28a23695781b028b88b2db21459d295001 -Author: Ram -Date: Mon Sep 14 22:51:35 2020 +0530 - - Fix multiple suspend/resume migrSchedule issue - - Signed-off-by: Ram - -commit 26af754f4f441db6582f143757d5a89386a1c83c -Author: Ram -Date: Fri Sep 11 22:16:52 2020 +0530 - - Wait for resource deletion before re-creating during migration - - Signed-off-by: Ram - -commit 76504464515e9c9f95c59d8d4897e9baeaf9ad5c -Author: Dinesh Israni -Date: Fri Sep 11 00:12:21 2020 -0500 - - Check for permission error when processing objects in resource collector - -commit a168522973f7d76fdb6b84b66ab86350e4b660a1 (origin/master-stork810) -Author: Luis Pabón -Date: Wed Sep 9 16:09:40 2020 -0400 - - Revert "Add authentication labels to volume" - - This reverts commit ab2ab4a00d358f1302ea9ba548f1b4866c6d86c9. - -commit ab2ab4a00d358f1302ea9ba548f1b4866c6d86c9 -Author: Luis Pabón -Date: Tue Sep 8 21:00:39 2020 -0700 - - Add authentication labels to volume - - If a request is authenticated, then the resulting volumes must have - the labels pointing to the authentication information - - Signed-off-by: Luis Pabón - -commit 84fb515ef5275afc9a9453065c4a1405558a2a7a -Author: Dinesh Israni -Date: Tue Sep 8 12:18:15 2020 -0500 - - Skip types in resource collector for forbidden errors - -commit 849b951f6a1ea3efee12193d6f2f2ebdc26f0374 -Author: sivakumar subramani -Date: Wed Sep 2 09:08:07 2020 -0400 - - Continuing with next namespace creation, if current ns already exists. - -commit 8b66f7b32f20e7c102ac0e7a6487d2d8f4b3d331 -Author: Rohit-PX -Date: Mon Aug 31 20:05:54 2020 -0700 - - Vendor updates for torpedo/auto-pilot/apimachinery - - Signed-off-by: Rohit-PX - -commit cb114b6b67e5c75ac70e5fd9ad4758a44fdf707e -Author: Rohit-PX -Date: Tue Jul 21 22:49:48 2020 -0700 - - Use objectstore drivers to validate deletion of app backups - - Signed-off-by: Rohit-PX - -commit c0553effb7673a45073005fa6c5e3e9e485479f0 -Author: Dinesh Israni -Date: Tue Sep 1 10:05:13 2020 -0700 - - Set Group to core fif empty when creating object map - -commit 373647b437fb7d9d6f10134ca598b1bd3f044233 (origin/master-stork-263) -Author: Dinesh Israni -Date: Tue Aug 25 16:36:20 2020 -0700 - - Add support to specify resources during application backup - -commit 915046a60cd113e02af7c8cf8236ceaa4d2928a4 -Author: Ram -Date: Fri Aug 21 17:52:59 2020 +0530 - - vendor updates sched-ops - - Signed-off-by: Ram - -commit ea687f06194a5cb8bb63f542fac4c681a4c40ce8 -Author: Ram -Date: Mon Aug 24 22:59:21 2020 +0530 - - Add debug logs to webhook server - - Signed-off-by: Ram - -commit b773b813de577ab22cddf682ef44a90f9a063af7 -Author: Ram -Date: Fri Aug 21 17:51:15 2020 +0530 - - Register all crds found at startup - - - create applicationregistration cr for all crds - present at k8s server - - Signed-off-by: Ram - -commit 04eb8c25862aa40fb532b0c795d9b529cebfca2d -Author: Ram -Date: Fri Aug 21 10:43:31 2020 +0530 - - [Portworx] Restrict backuplocation for migration in px < 2.6 - - Signed-off-by: Ram - -commit f1504ba8bf6771ed732ed3aee3c84540ad762763 -Author: Ram -Date: Thu Aug 20 00:00:09 2020 +0530 - - create backuplocation on destination cluster if passed as pair option - - Signed-off-by: Ram - -commit 931f76d1dd64c38ffa11a6fa6ed9ae2e31775f2c -Author: Ram -Date: Wed Aug 19 23:58:59 2020 +0530 - - [portworx] pass in backuplocation as credid to clusterpair create api - - use IoProfileBkupSrc for volume restore - Signed-off-by: Ram - -commit 267a2d45c45a8035284a0a32e1030da4a5ec23a3 -Author: Ram -Date: Wed Aug 19 23:58:16 2020 +0530 - - vendor updates - libopenstorage/openstorage - - Signed-off-by: Ram - -commit 6c183fbe824476cb552c9b617f1378036b35409f (tag: v2.5.0-rc1) -Author: Prashanth Kumar -Date: Mon Aug 17 07:48:49 2020 +0000 - - Adding actual backup size in backup volume info - -commit 026b75e319a6ce62a37f0c5e1a3a534ef771f5d0 -Author: Prashanth Kumar -Date: Sun Aug 16 03:57:16 2020 +0000 - - [Azure]: Fix to populate backup size for azure volumes - -commit 486be964eb459ef0f861705f38f512c092f1ed08 -Author: Prashanth Kumar -Date: Thu Aug 13 16:46:56 2020 +0000 - - Passing empty endpoint to AWS objectstore operations - - AWS SDK fetches correct endpoint based on provided region - -commit 9137cf544fca3fd772f9c13ee6361ae231dcac19 -Author: siva-portworx -Date: Mon Aug 10 12:29:00 2020 +0000 - - Added check for Notfound error in GetRestoreStatus of all driver. - - - In all driver module except portworx, add check in get volume - status during restore to mark the restore of the volume, if we - NotFound error, while querying volume status. - -commit 843d3b6f920cdbe545438b4e22fb5a7573d16d4e -Author: Prashanth Kumar -Date: Mon Aug 17 18:21:02 2020 +0000 - - Computing total backup size before uploading metadata.json file - -commit 1dac4379fc8e18e09500b98f59368c7f0d93c786 -Author: Dinesh Israni -Date: Thu Aug 13 20:04:35 2020 -0700 - - Don't do namespace check in ApplicationBackup if it is completed - -commit db9a159795aaa698138d685c4dc27b92aeaa69ff -Author: Dinesh Israni -Date: Thu Aug 13 20:04:16 2020 -0700 - - Only create namespaces that are being restored - -commit c1f4dba5f8ca2b034d56b9b235e11d7b0903277f -Author: Dinesh Israni -Date: Thu Aug 13 19:52:34 2020 -0700 - - Use core as group when checking for PVC to restore - -commit 55a355495cb2098d9961ad552246d51580b3ddb2 -Author: Dinesh Israni -Date: Mon Aug 10 21:02:55 2020 -0700 - - ResourceCollector: When checking for resources set group name for core - - The group in the resource is empty, need to set it to core before comparing - -commit 890250ca53187c614635e6b9d0fb25605a3cf809 -Author: Dinesh Israni -Date: Thu Aug 6 16:30:32 2020 -0700 - - Update Rule spec to take in container name - - This is required since a pod could have multiple containers and we - need to know which one we should run the commands in - -commit ddc1313ace02d7bf3d273d8133af5211f9382af2 -Author: Dinesh Israni -Date: Thu Aug 6 16:29:58 2020 -0700 - - Add support to pass in * to select all namespaces to backup - -commit 20d6600800dc043293219c82edf30a3a39c5f74f -Author: Harsh Desai -Date: Sat Aug 8 15:32:26 2020 -0700 - - Update copyright to 2020 - -commit 03645313f300e57d269d019aa9aef1ebd47090dd -Author: Dinesh Israni -Date: Thu Aug 6 22:25:51 2020 -0700 - - Add support to collect LimitRange for Migration/ApplicationBackup/Clone - -commit 38a3d0e88f11c1957056cd9266a745fedccd08b9 -Author: Ram -Date: Tue Aug 4 20:56:43 2020 +0530 - - allow string type for appreg suspend option - - Signed-off-by: Ram - -commit c560870c6513cf0ae7c010785a3fbc3328929fd1 -Author: Ram -Date: Thu Jul 30 21:03:42 2020 +0530 - - vendor updates sched-ops - - Signed-off-by: Ram - -commit c1f5b284fd6001cd2185b8eb1ff407d2e4fe698c -Author: Ram -Date: Mon Jul 13 16:59:46 2020 +0530 - - storkctl ux enhancement - - accept storage option for clusterpair - - add option to allow validating migration spec - - add watch option to storkctl stork resources - - fix error checks - - Signed-off-by: Ram Suradkar - -commit 6bd079c2dbdfcfa6775cf14808a6dfe1b6626720 -Author: Grant Griffiths -Date: Thu Jul 30 10:27:44 2020 -0700 - - [Portworx] Move jwtIssuer to be configurable env var - - Signed-off-by: Grant Griffiths - -commit f8508819999ae828a7297efe5e39a9e738e41c4f -Author: Grant Griffiths -Date: Tue Jul 28 14:04:56 2020 -0700 - - [Portworx] Add check for PX version check for jwt issuer - - Signed-off-by: Grant Griffiths - -commit c885fcc517746e19e09c2be216517a18532093a9 -Author: Rohit-PX -Date: Wed Jul 22 17:40:18 2020 -0700 - - Create new bucket for stork integration tests - - Signed-off-by: Rohit-PX - -commit a13cf62cb58a033cae5451062f0070a046ea72bc -Author: Dinesh Israni -Date: Thu Jul 23 09:51:32 2020 -0700 - - Vendor update for sched-ops - -commit 2913e9a4dbbd11c86054f60c20f25e4fbdab38b5 -Author: Ram -Date: Thu Jul 23 22:09:56 2020 +0530 - - ignore if appregistration cr is not present on cluster - - Signed-off-by: Ram - -commit 79beb7b368f2cbdde72f29a95b956b4794363824 -Author: siva-portworx -Date: Mon Jul 20 13:09:10 2020 +0000 - - Added fix to not to wait for the re-created resources in DeleteResources API. - -commit d71a72639040a642c645d35efea87af41d2b9f47 -Author: Ram -Date: Thu Jul 23 17:30:56 2020 +0530 - - use resourcecollector k8sclient instead of schedop client to list - crd resources - - Signed-off-by: Ram - -commit d0f437ecf0bc0ab34c81bf189c410e9807657912 -Author: Ram -Date: Thu Jul 23 12:58:51 2020 +0530 - - vendor updates torpedo,schedops - - Signed-off-by: Ram - -commit 9894cc3a0c74ff05b6e50f0a67facc86a21b55cf -Author: Ram -Date: Thu Jul 16 21:55:05 2020 +0530 - - update sched-ops createNamespace api - - address review comments - - Signed-off-by: Ram - -commit 156ab1456043087e55591a8ea23bab390a9cfeb9 -Author: Ram -Date: Wed Jul 8 23:43:17 2020 +0530 - - replace namespace metadata if namespace already exists - - Signed-off-by: Ram - -commit 6e7c4ceea12ac211d7de9725f3927f9c1127b134 -Author: Ram -Date: Wed Jul 8 00:36:03 2020 +0530 - - keep namespace metadata for application backup,restore & clone - - Signed-off-by: Ram - -commit db41af12556938bb6801dba12618b2e7b6b9d131 -Author: Dinesh Israni -Date: Tue Jul 21 17:11:42 2020 -0700 - - Add some missing storage classes in integration tests - -commit 8a3223c18abdc4ef69acead06180bb04e691b96c -Author: Dinesh Israni -Date: Tue Jul 21 16:50:02 2020 -0700 - - Ignore empty pre and post exec rules - -commit b59a8963fb5722a45f6df5ca93653d768cec1655 -Author: Ram -Date: Mon Jul 20 19:54:33 2020 +0530 - - delete crd.json on applicationbackup delete retension - - Signed-off-by: Ram - -commit 4f725e7283e1795b7f001dd339b2b511f7945859 -Author: Ram -Date: Wed Jul 8 00:36:58 2020 +0530 - - handle pre/post exec rule for volumesnapshotschedules - - Signed-off-by: Ram - -commit 61017a7f1e81ca5f64dee22fddbb54a3e2a6bae0 -Author: Rohit-PX -Date: Thu Jul 16 14:40:08 2020 -0700 - - Fix expected backup count for scheduled interval backup tests - - Signed-off-by: Rohit-PX - -commit 46e9a68ba9551362f28ced6692fdd0424a013c8a -Author: Prashanth Kumar -Date: Fri Jul 17 13:55:40 2020 +0000 - - [Portworx]: Fix updating of volume info for backup size on unsupported porx version - -commit 71f3174023222e4f654f2758e821d9db338eaf49 -Author: Dinesh Israni -Date: Mon Jul 13 19:35:37 2020 -0700 - - Collect ResourceQuota objects from ResourceCollector - -commit 014b4a24bd86461a775f9b4af7a91f893f5998cd -Author: Dinesh Israni -Date: Mon Jul 13 19:34:55 2020 -0700 - - Update namespace in Role during apply in resource collector - -commit 3e71515998dc0aa678e02cd63fb3db0a08714132 -Author: Christoph Böhmwalder -Date: Fri Jul 3 10:19:15 2020 +0200 - - [LINSTOR] add volume driver for LINSTOR - - Add a new volume driver for LINSTOR, an SDS solution by LINBIT. Also - make some changes in order to be able to run the integration tests. - -commit 20fb0c4133da36eadd1f0ae38096fb1d42f1d676 -Author: Christoph Böhmwalder -Date: Fri Jul 3 10:18:13 2020 +0200 - - vendor changes for LINSTOR driver - - Update dependencies and add golinstor - -commit eeab301f7ded43f74a500199ef82f0a427f8fdc4 -Author: Dinesh Israni -Date: Mon Jul 6 21:17:18 2020 -0700 - - Skip downloading CRDs in ApplicationRestore if not uploaded - -commit d388b7b85e2b8e2897e27804ed009fedc299114d -Author: Ram -Date: Tue Jul 7 21:47:28 2020 +0530 - - disable webhook controller by default - - Signed-off-by: Ram - -commit cb6d155a6c9132654005bb331263b915d3cc56fb -Author: Dinesh Israni -Date: Mon Jul 6 20:14:03 2020 -0700 - - Skip backup of PVCs being deleted or in Pending state - -commit 42acd34471b2493040acb6dede0e8dc60bb2ab2d -Author: Ram Suradkar -Date: Thu Jul 2 11:50:49 2020 +0000 - - fix restore fail issue by removing unneccesary metadata fields - - change app reg names - - correct typo for couchbase appregistration - - Signed-off-by: Ram Suradkar - -commit 176e632c9d392e7e255ef5616de41d48ea1ab78e -Author: Dinesh Israni -Date: Wed Jul 1 15:58:28 2020 -0700 - - [Portworx] Init driver from InspectVolume if required - -commit bc1dbc43d369c21abb1340db5e0c422081bc282b -Author: Rohit-PX -Date: Mon Jun 29 10:06:22 2020 -0700 - - Separate application backups and restores for all tests - - Signed-off-by: Rohit-PX - -commit 39f960a75807bcf2f05c70157f231fb6399b9aa9 -Author: Ram -Date: Wed Jul 1 18:02:22 2020 +0530 - - allow activate/deactivate for migrated crds - - Signed-off-by: Ram - -commit f2d0fb072a1ae6257d343b95a76140db766a819b -Author: Ram -Date: Wed Jul 1 11:55:43 2020 +0530 - - Migrate resource specifc crds only - - fix review comments - - use struct for registering app resources rather than csv parsing - - use group/version/kind while collecting resources - Signed-off-by: Ram - -commit bb59ff5a4a461862d4e4fc6281cb883fcfc70ed9 -Author: Ram -Date: Tue Jun 30 22:47:51 2020 +0530 - - add uts for appreg cli - - Signed-off-by: Ram - -commit a49c641ebbf30486a0b336113fccc9bb70a08798 -Author: Ram -Date: Tue Jun 30 20:54:21 2020 +0530 - - skip migrating crd if app not registered - - fix duplicate app registration - - Signed-off-by: Ram - -commit be71d0de69a12e192db27f692887b4273338e149 -Author: Ram Suradkar -Date: Tue Jun 30 06:16:10 2020 +0000 - - vendor updates for sched-ops - - Signed-off-by: Ram Suradkar - -commit 8464c12f8848a670a60f94444aba6a428f917864 -Author: Ram -Date: Tue Jun 30 11:11:14 2020 +0530 - - Create app registration for already supported CRD's - - add storkctl option for retriving app reg - - Signed-off-by: Ram - -commit e1fc97ab90374bab47c55709f7a37b1b92edb832 -Author: Ram -Date: Fri Jun 19 23:44:43 2020 +0530 - - Support generic CRD migration,backup and restore - - Signed-off-by: Ram - -commit 6d7585c1c220ad3823f60a598cf8a6ba745a1ff6 -Author: Prashanth Kumar -Date: Tue Jun 30 08:54:27 2020 +0000 - - Adding volume size to restore CR - -commit 82133ed72e6bfb9d3a9ca2a02f70a810ab4fcfa6 -Author: Prashanth Kumar -Date: Tue Jun 30 08:52:14 2020 +0000 - - [Portworx]: Driver changes for the following - - Returning backup size zero for older portwrox driver which doesnt - support OSD Size() call - - Adding size to restore volumes - -commit 9255e7a9877535484b77a185b505b514ca6800df -Author: Dinesh Israni -Date: Mon Jun 15 16:00:10 2020 -0700 - - Add support for restoring specific objects - - Users can specify individual objects to restore by specifying them under - spec.includeResources in ApplicationRestore - By default all objects from the backup will be restored - -commit 5dfb8543bfb4e735e7de344fa05ea6e0fa6bfdec -Author: Ram -Date: Fri Jun 26 22:14:29 2020 +0530 - - Add applicationregistration CR to register custom CRD for backup, migration - - Signed-off-by: Ram - -commit b89dee5cd5334dcabafca9aec4d6ccc1cb13c4f2 -Author: siva-portworx -Date: Mon Jun 22 11:03:42 2020 +0000 - - corrected the typo in the failure reason. - -commit 04e8e89d92eae87d386e2683aaaea9edda83d0d8 (origin/master_error_fix) -Author: siva-portworx -Date: Thu Jun 18 19:49:33 2020 +0000 - - Removed CR updates that leads to "object has be modified error". - - - In backupVolumes() and backupResources(), remove the - CR updates that can lead to "object has be modified error" - as two updates happens in the same cycle of reconciler. - -commit 32d39271602b0ec7a37512bb3dda4e2eadcc88fa -Author: Dmytro Tsapko -Date: Tue Jun 9 05:10:50 2020 +0300 - - [portworx] authorization for cluster manager calls is added - - Signed-off-by: Dmytro Tsapko - -commit 56967162e4049d4970919842d0e897bedee754d0 -Author: stgleb -Date: Thu Jun 18 01:50:21 2020 +0300 - - STOR-200 Add prepare and verify pods for mysql - -commit a2bc3899256069f7e08df2cd251064f8ca5b9946 -Author: Prashanth Kumar -Date: Sun Jun 14 17:01:26 2020 +0000 - - Added fetching backup size of volume backup - -commit 7ec19c88322e891772b01d7a7357efb2998fedc8 -Author: Prashanth Kumar -Date: Fri Jun 12 15:01:58 2020 +0000 - - vendor update for libopenstorage to support cloud backup size proto - -commit 55bd790a06d16799e9e37f2a454a2d828d4c60de -Author: Ram -Date: Thu Jun 18 19:58:25 2020 +0530 - - Disable auto-updating app scheduler as stork for app, - if disable annotations is applied - - Signed-off-by: Ram - -commit ca65f5006f4241a145d95c55803e3971c963e550 -Author: Dinesh Israni -Date: Mon Jun 15 18:22:46 2020 -0700 - - Add events when deleting pods from health monitor - -commit 9660efb6ea32c5e63d87d31de1c2992a3fa3a3f8 -Author: Dinesh Israni -Date: Mon Jun 15 18:20:42 2020 -0700 - - [Portworx] Map node status None to Online - - During node start the status for all the nodes is set to None, this shouldn't - cause pod deletions from the health monitor - -commit 8b4b217b0ceb771d681230e92a4edf97036bb950 -Author: Rohit-PX -Date: Fri Jun 5 15:43:25 2020 -0700 - - Integration test to check health check fix - - Signed-off-by: Rohit-PX - -commit 2bfd7160ebc5341903f0fab2b939184479b62c1e -Author: Dinesh Israni -Date: Mon Jun 8 16:05:30 2020 -0700 - - Add support to collect couchbase CRs - - Will be used during Migration, ApplicationClone and ApplicationBackup - -commit 23f558043bfbb8f3d8843bd417a4d7114f244ee7 -Author: Dinesh Israni -Date: Thu Jun 4 18:11:22 2020 -0700 - - Fix storkctl error message when suspending backup schedule - -commit 02a8ddb2603c1719dc5a1a4b09d24ab59ca52941 -Author: Dinesh Israni -Date: Thu Jun 4 16:37:54 2020 -0700 - - Fix elapsed time for application backup when it hasn't started - -commit 1636003bf7169c4227a86dddfa74c7d7466caa6e -Author: Dinesh Israni -Date: Thu Jun 4 16:37:42 2020 -0700 - - Bump version to 2.5.0 - -commit 152a07be564fafa79fd4de3fa6ba665650155f5f -Author: Dinesh Israni -Date: Thu Jun 4 14:38:34 2020 -0700 - - [Portworx] Add options to pass in incremental count frequency - -commit 0e091fb57413fbc5d52a9e127a91a8436c0e90e3 -Author: ram-infrac <35250819+ram-infrac@users.noreply.github.com> -Date: Fri Jun 5 10:24:42 2020 +0530 - - Update cmd/stork/stork.go - - Co-authored-by: Dinesh Israni - -commit ff9aab6c2e23f47d29b6d3b2f624db080fe88c7f -Author: Ram -Date: Fri Jun 5 09:55:45 2020 +0530 - - add flag to enable/disable webhook controller - - Signed-off-by: Ram - -commit cd05e53933c96d8de63aeaf5d10d7c3c9fbc4427 -Author: Dinesh Israni -Date: Thu Jun 4 14:40:56 2020 -0700 - - Add storageclass to s3 config in BackupLocation - -commit 302dab2f82005aff31268d17a32dfd3fc8de4ca7 -Author: Dinesh Israni -Date: Thu Jun 4 14:39:41 2020 -0700 - - Add -q flag to wget in Dockerfile - -commit fdc70d4eb5a52bf61f2837b99c015c5e5c644661 -Author: Ram -Date: Thu Jun 4 19:40:10 2020 +0530 - - use admission review request namespace to get volume info - - dont proceed if volume owner not found - - Signed-off-by: Ram - -commit 0a5a4afe686e0c1fcc8dd85432e19d292c344642 -Author: Christoph Böhmwalder -Date: Fri May 22 11:58:28 2020 +0200 - - fix stork-scheduler RBAC role for Kubernetes 1.18 - - While trying to deploy stork on a k8s cluster with version 1.18, we - noticed that some permission errors came up when deploying the scheduler. - This fixes the yaml to work for k8s 1.18. - - Signed-off-by: Christoph Böhmwalder - -commit 6343f8025385f5641edfe256d501bd53d3fe6198 -Author: Rohit-PX -Date: Thu May 28 10:15:12 2020 -0700 - - Add apt-get update to deploy script - - Signed-off-by: Rohit-PX - -commit c36af8e0b67efa4168d6efe2fb885b9038684d0e -Author: Dinesh Israni -Date: Wed May 27 12:18:42 2020 -0700 - - [AWS] Use correct volume id for CSI - -commit 61e36d97273c09cb42bffa2e7089bc219ef42181 -Author: Ram -Date: Wed May 27 21:46:39 2020 +0530 - - Create clusterdomainstatus CR at controller init - - Signed-off-by: Ram - -commit 46d7ea8e49b4da891f6f71240dbe8d7cdc8810fa -Author: Dinesh Israni -Date: Thu May 21 17:13:10 2020 -0700 - - [GCP] Parse zone and volume name for CSI PVCs - -commit 558b93ea72cd96edc84fa8f969ba21606fb06989 -Author: Dinesh Israni -Date: Thu May 21 17:12:38 2020 -0700 - - Vendor update for gcp libaries - -commit 8809af2c5fde3ef727c2243c3ab18281d1c8f172 -Author: Dinesh Israni -Date: Wed May 20 17:06:15 2020 -0700 - - Fix check for preparing jobs during restore - -commit 25d93f20fd76bc4143eb41d64629dc341fbe20ca -Author: Christoph Böhmwalder -Date: Fri May 8 10:39:57 2020 +0200 - - specs: remove initializers field from specs - - The metadata.initializers field has been deprecated since Kubernetes - 1.13, and has been completely removed in 1.16. So, when using a - Kubernetes version >=1.16, this errors out while creating the - deployment. - - Fix this by just removing the key in stork-deployment.yaml and - stork-scheduler.yaml. - - Signed-off-by: Christoph Böhmwalder - -commit d39c7e69afc409823034b6d7a6613c1cbe4ff037 -Author: Dinesh Israni -Date: Tue May 19 13:23:58 2020 -0700 - - [GCP] Fix for using correct volume ID for CSI PVCs - -commit 11aa9b09300e1e1f3813318a6c74d76912779e24 -Author: Dinesh Israni -Date: Tue May 19 13:16:56 2020 -0700 - - [AWS] Fix for using correct volume ID for CSI PVCs - -commit df398cd173852859f944d5af8edf2e51d0690c2c -Author: Dinesh Israni -Date: Tue May 19 13:03:14 2020 -0700 - - [Azure] Fix for using correct volume ID for CSI PVCs - -commit cb9f1aed0259bb1e9bd3abcfffbf7c53dcec1fae -Author: Dmytro Tsapko -Date: Thu May 7 02:10:36 2020 +0300 - - [portworx] openstorage was updated - - Signed-off-by: Dmytro Tsapko - -commit 29904ff46b62fb1f10fa6cf9ef9e5f0ce86e4f02 -Author: Dmytro Tsapko -Date: Wed Apr 22 21:23:01 2020 +0300 - - [portworx] TLS related scheduler to driver communication updates. - 1. TLS for Legacy REST API decided to be disabled. - 2. possibility of loading CA cert from external file was added - - Signed-off-by: Dmytro Tsapko - -commit c51d7a4d121f6be6a338abb5cde567fa7da09b11 -Author: Rohit-PX -Date: Wed May 13 22:49:06 2020 -0700 - - Vendor changes for security including torpedo, schedops and openstorage - - Signed-off-by: Rohit-PX - -commit 9d288298785275d5c92e184353273f8542b710ae -Author: Rohit-PX -Date: Wed May 13 22:43:44 2020 -0700 - - Changes for running stork with px security - - Signed-off-by: Rohit-PX - -commit cdb859759d74b1956c9fe974bf8998987f91f171 -Author: Dinesh Israni -Date: Wed May 13 14:15:00 2020 -0700 - - [AWS] Add CSI provisioner name check - -commit fbd5a9e681f95f79b98eaf293ce5c45414c3c3fe -Author: Dinesh Israni -Date: Wed May 13 14:12:58 2020 -0700 - - [GCP] Add CSI provisioner name check - -commit 9af530b8399f5f35d6908c086129bf8a0847e21f -Author: Dinesh Israni -Date: Wed May 13 14:10:13 2020 -0700 - - [Azure] Add CSI provisioner name check - -commit 13f7994c13763aaecf9d4e0e52df9c7655cae7f5 -Author: Dinesh Israni -Date: Wed May 13 14:39:34 2020 -0700 - - Update vendor dependencies - -commit 8b0aca75eb40ffca9a6851688ba499ebf08d7ece -Author: Dinesh Israni -Date: Tue Apr 14 16:01:25 2020 -0700 - - Collect jobs from resource collector - - Since jobs might not be idempotent, restoring/cloning/migrating them are - optional. They will always be backed up - -commit e5a35793e40569654cee3537c49d235b7359d1ac -Author: Ram -Date: Thu May 7 10:04:44 2020 -0700 - - change base docker image to redhat-ubi8 - - Signed-off-by: Ram - -commit 7442bd342b3041a0c2d9c088f5656c8700fdf5d1 -Author: Rohit-PX -Date: Mon May 11 12:26:51 2020 -0700 - - Change regex to pick individual focussed test - - Signed-off-by: Rohit-PX - -commit e6ec5554091c817553a6551950de14f55b76ecac -Author: Rohit-PX -Date: Mon Apr 27 15:58:32 2020 -0700 - - Add env variables for aws access to stork-test - - Signed-off-by: Rohit-PX - -commit 3ae156cc74fc2be5d25a00fa2cabaa77683195d1 (origin/master-pb438) -Author: Dinesh Israni -Date: Sat Apr 25 19:48:21 2020 -0700 - - During backup skip PVCs with unsupported drivers - -commit e03885db7dbc97fa8a7464f5a5dba1b04926f79b -Author: Dinesh Israni -Date: Fri Apr 24 20:48:13 2020 -0700 - - Don't block driver registration if init fails - - The init will be tried again when the APIs are called if init had failed - -commit e98e472b0fefbd3062ce4bdd089836a525f51c86 -Author: Ram -Date: Mon Apr 13 10:46:54 2020 +0530 - - vendor update for openstorage - - Signed-off-by: Ram - -commit a6df695bdd20d0b511f83add217e68e888f388b4 -Author: Ram -Date: Mon Apr 13 08:45:13 2020 +0530 - - [portworx] cloudsnap restore api update - - Signed-off-by: Ram - -commit 62e08ba1ab2635f88653a106bd5cc51f091e9edc -Author: Dinesh Israni -Date: Sat Apr 18 23:53:29 2020 -0700 - - Allow drivers to return resources that should be restored before volumes - - This allows the Portworx driver to specify any secrets that it might require to - be created before starting the volume restore. - No-op in the other drivers for now. - -commit f154ad70c80df0f786306b3a05a84f5dcd35fb73 -Author: Dinesh Israni -Date: Sat Apr 18 18:24:47 2020 -0700 - - Ignore error when updating initial state for pre exec rule in backup - - The object could have been modified. This allows the reconcile to run again with - the updated object - -commit 76b5f677548811a5a501d228788d55299e5c1ae0 -Author: Dinesh Israni -Date: Sat Apr 18 16:13:40 2020 -0700 - - Skip PV restore if bound to a PVC that shouldn't be restored - -commit 72693b859342d59d90dbac783ce653b3597e555b -Author: Dinesh Israni -Date: Fri Apr 17 23:44:24 2020 -0700 - - Fix pre exec rule failure in application backup - - Multiple updates were failing because of conflicts. Fetching latest object - before updating in pre exec path - Also if the pre exec rule fails the backup will be marked as Failed instead of - retrying - -commit e19b2d5b96ec6f0ae6cbe0c97c9bb38da94dc17f -Author: Ram -Date: Mon Apr 13 20:48:42 2020 +0530 - - update health check UT's for node status backupoff retries - - Signed-off-by: Ram - -commit 2033f1fb65fa9238b56c63763b6cb2b08a149505 -Author: Ram -Date: Thu Apr 9 21:03:24 2020 +0530 - - add backoff to check node status before deleting pods - - make node status check as async routine - - Signed-off-by: Ram - -commit c6bec7b08fc364d91b6cdc747415eecd4ee6b704 -Author: Ram -Date: Thu Apr 9 21:02:52 2020 +0530 - - Add driver node inspect api - - Signed-off-by: Ram - -commit fdb9fa4bec10540e95bcfb47fb60ab6b9dd44221 -Author: Dinesh Israni -Date: Thu Apr 16 18:38:04 2020 -0700 - - During restore skip resources from namespaces that aren't provided in spec - -commit ded9302c7a5f92d2318c37ea4c9121f47a5f41d7 -Author: Dinesh Israni -Date: Mon Apr 13 15:22:37 2020 -0700 - - Create bucket during application backup if possible - -commit 8c65b881689a931d10fda2c6730f773328020598 -Author: Dinesh Israni -Date: Mon Apr 13 15:22:23 2020 -0700 - - Vendor update - -commit dbb605fd07d291039f41d34c20425bb8d7e89793 -Author: Dinesh Israni -Date: Thu Apr 16 22:52:09 2020 -0700 - - [AWS] Retry snaps for internal errors - -commit b58853d44b97129b16172ef39a46654ae82dc3d8 -Author: Dinesh Israni -Date: Thu Apr 16 15:55:18 2020 -0700 - - Return error if not able to fetch snapshot info in schedule controller - -commit c160c90f1df05a3f487467dc631163392a2a2346 -Author: Dinesh Israni -Date: Thu Apr 16 15:54:47 2020 -0700 - - [Portworx] Add failure reason for cloudsnap to message - -commit c2123470a0571264651dbb5ce92228c8510fc81b -Author: Dinesh Israni -Date: Wed Apr 15 16:46:22 2020 -0700 - - For older backups missing the driver name, set the default - -commit 464474b0526c029a1576ac444b237a4aacea0a70 -Author: Dinesh Israni -Date: Wed Apr 15 16:45:41 2020 -0700 - - Remove some cattle specific annotations from namespace for migration - -commit da9194746bf813590c011a5ccf9f69cbd5379cc2 (origin/master_rulecr_update) -Author: Ram -Date: Tue Apr 14 22:06:12 2020 +0530 - - Don't start webhook controller if driver is empty - - Signed-off-by: Ram - -commit 69e5e26faf7612d7fbf2f59872cf382544759e29 -Author: Dinesh Israni -Date: Mon Apr 13 17:55:09 2020 -0700 - - [AWS] Add backoff for snapshot API in case rate limit is hit - -commit d045c5911630f2e68a4a7dc579092804235edd26 -Author: Rohit-PX -Date: Fri Apr 10 00:16:31 2020 -0700 - - Explicitly reset kubeconfig after every migration-backup loop - - Signed-off-by: Rohit-PX - -commit df8bcecf8f717824e9907712cdb7696446d9a821 -Author: Dinesh Israni -Date: Wed Apr 8 22:14:23 2020 -0700 - - [Portworx] Add backup id to spec when checking status - -commit d5cbcca0f8f1b0fb01fab1cf6934780d51193271 -Author: Dinesh Israni -Date: Wed Apr 8 22:14:03 2020 -0700 - - Mark backup as failed if preparin or uploading resources fails - -commit 6b16f8c2f4404ce339d8d6b08d68fdc82ca7db23 -Author: Rohit-PX -Date: Fri Apr 3 14:39:36 2020 -0700 - - Integration test for application backup of a migrated app - - Signed-off-by: Rohit-PX - -commit f4c24a7b9ea2775df9acbec8145f6d3b157e2f3c -Author: Dinesh Israni -Date: Fri Apr 3 09:33:26 2020 -0700 - - Check if schedule policy exists in cache - -commit c8d0a7460c1f7919deb3c9da05b2b83bb54bda75 -Author: Serhii Aheienko -Date: Tue Mar 3 20:58:35 2020 +0200 - - Use controller-runtime manager - - Signed-off-by: Serhii Aheienko - -commit 17aee876f1ecf1ce9d2480c6ebed561997002f38 -Author: Serhii Aheienko -Date: Tue Mar 3 20:58:12 2020 +0200 - - Update dependencies - - Signed-off-by: Serhii Aheienko - -commit f93c2405bd1e2501402b1836cd6291bb63d34d6e -Author: Dinesh Israni -Date: Tue Mar 24 17:46:00 2020 -0700 - - Vendor update for sched-ops - -commit 95db0243d6c04c47d8d096bcd12a66b57761522e -Author: Dinesh Israni -Date: Tue Mar 24 17:45:34 2020 -0700 - - Merge service accounts when applying for restore - -commit 4b0367842c5c72ea26803e66ecf1b9b5a08c71d0 -Author: Dinesh Israni -Date: Thu Mar 26 19:17:17 2020 -0700 - - Don't update LasUpdateTime in application backup if status isn't being updated - -commit ebb49b970f04ea72b92c33a26e89af1b9db67639 -Author: Dinesh Israni -Date: Tue Mar 24 18:22:57 2020 -0700 - - [Portworx] Check for annotation to skip backuplocation name check - -commit 70db754b57b25cdfd556182faa62257712aa9080 -Author: Dinesh Israni -Date: Tue Mar 24 18:22:38 2020 -0700 - - Vendor update - -commit 073c11880fe797c44573650feb123733f968145d -Author: Ram -Date: Thu Mar 26 11:31:15 2020 +0530 - - Avoid adding duplicate volume to restore status - - Signed-off-by: Ram - -commit 6e65288ed7cec2475330dc2f64a24f2ef7e2f0ba -Author: Rohit-PX -Date: Mon Mar 23 00:36:54 2020 -0700 - - Integration test for migration and reverse migraiton between two clusters - - Signed-off-by: Rohit-PX - -commit ca77db97beb9ea46d986e5c6d50694b6e1b241a9 -Author: Rohit-PX -Date: Fri Mar 13 12:25:58 2020 -0700 - - Delete app backups before backuplocation in sync controller tests - - Signed-off-by: Rohit-PX - -commit e9fc93492186051a9cac3c5a57763c2037d75d4f -Author: Dinesh Israni -Date: Mon Mar 23 16:49:25 2020 -0700 - - Add annotation to collect objects that have an owner reference - - In case of objects created by operators, might still want to collect some - objects that are only created by the operator once - -commit b680bdac79563df7708eea5a41c384465dd2b746 -Author: Dinesh Israni -Date: Fri Mar 20 17:03:04 2020 -0700 - - Update sched-ops client for all packages - -commit 20ff8ae1edb28821ce3ad9e681a28aaa08a870fb -Author: Ram -Date: Tue Mar 17 19:57:17 2020 +0530 - - [portworx] handle in-place restore for vol repl gt 2 for px driver - - - integration test check for in-place restore repl 3 vols - - Signed-off-by: Ram - -commit 4e64a43b6dd60cf8b375877cd5953b8df7f6a6d3 -Author: Rohit-PX -Date: Tue Mar 17 02:17:52 2020 -0700 - - Reduce wait time between checks for app backup - - Signed-off-by: Rohit-PX - -commit a5c3ec6de40dd218c517203b694d90b8dd93287b -Author: Rohit-PX -Date: Mon Mar 16 23:50:25 2020 -0700 - - Add parameter for secret name for cloud provider API access - - Signed-off-by: Rohit-PX - -commit 545b9ebb6f4c0aa52d494b21748fb0cb1281a6f3 -Author: Dinesh Israni -Date: Thu Mar 12 14:40:00 2020 -0700 - - Add cache for schedule policy - -commit c53bd588fd85d9a2a7f4111ff6c8658378ff44df -Author: Rohit-PX -Date: Fri Mar 13 15:28:32 2020 -0700 - - Add parameter for path in backup location - - Signed-off-by: Rohit-PX - -commit 5186ab68dc7e77a122948c378d80d7747be24720 -Author: Rohit-PX -Date: Wed Mar 11 16:50:13 2020 -0700 - - Do not install storks-scheduler for non-PX backups - - Signed-off-by: Rohit-PX - -commit d0c6dce72628154410d88e590b705415e613bd24 -Author: Rohit-PX -Date: Tue Mar 10 11:44:07 2020 -0700 - - Import k8s client auth, add gcloud binaries, aws authenticator in tests - - Signed-off-by: Rohit-PX - -commit b46a1fcd1c39b031fb7ecdaa5e53bc3fc60c3550 -Author: Dinesh Israni -Date: Thu Mar 5 18:40:58 2020 -0800 - - [Azure] Check if snapshot or volume is created before starting new operation - -commit 4dbda80748afd799f8109517ff70c9a491c7aa7e -Author: Dinesh Israni -Date: Thu Mar 5 16:08:18 2020 -0800 - - [GCP] Check if snapshot or volume is created before starting new operation - -commit 0286d275baaa504f67a76d04fa97f384b0cb102d -Author: Dinesh Israni -Date: Thu Mar 5 14:36:21 2020 -0800 - - Use common helper to generate tags for snapshots and volumes - -commit 0094d7bc422e4482585746d59d9c784a538fdd0a -Author: Dinesh Israni -Date: Thu Mar 5 14:20:09 2020 -0800 - - [AWS] Check if snapshot or volume is created before starting new operation - - Helps deal with crashes and when the operations are triggered but we aren't able - to store the status in the object - -commit 4e1265dc021f95885038607f2ccb31ae829d3e46 -Author: Rohit-PX -Date: Fri Mar 6 17:10:47 2020 -0800 - - Set all configmap after default config map is correctly set - - Signed-off-by: Rohit-PX - -commit dd6528ea402a823b9749741f9ce1550d05ef62de -Author: Rohit-PX -Date: Thu Mar 5 17:48:12 2020 -0800 - - Import Azure, AWS stork vol driver in test - - Signed-off-by: Rohit-PX - -commit 4b00824317d818eae8076bf6b89a8c30e9f0d295 -Author: Rohit-PX -Date: Thu Mar 5 11:17:32 2020 -0800 - - Remove vol driver from stork spec when not using PX driver - - Signed-off-by: Rohit-PX - -commit 2a6c59b2d9f6ae0dfa935dcdefc7192b5ea83a41 -Author: Dinesh Israni -Date: Tue Mar 3 18:31:49 2020 -0800 - - Add last update timestamp to backup and restore specs - -commit d027fd80a4a5e3245aa2a87446f710d354b8a009 -Author: Rohit-PX -Date: Wed Mar 4 18:49:59 2020 -0800 - - Replace volume driver in stork specs for non-PX backends - - Signed-off-by: Rohit-PX - -commit 4bc7695a549befbf83a11ebf04d19cb28311b6bc -Author: Rohit-PX -Date: Wed Mar 4 17:18:50 2020 -0800 - - Add apt-get install jq for ubuntu base images - - Signed-off-by: Rohit-PX - -commit 342869040bbce579a9ca1e2b103134ff30b8c40c -Author: Dinesh Israni -Date: Tue Feb 25 16:45:21 2020 -0800 - - Add a reason field for the overall backup and restore status - -commit 29c97961b3aa7e03f85548f05127ef6e5cae0fec -Author: Rohit-PX -Date: Tue Mar 3 18:06:08 2020 -0800 - - Integration to start app backup when another is in progress - - Signed-off-by: Rohit-PX - -commit eb919d621dba880026faca698c80f92d6399609c -Author: Rohit-PX -Date: Mon Mar 2 21:35:21 2020 -0800 - - integration test to delete backuplocation during app backup - - Signed-off-by: Rohit-PX - -commit 2ae626e5c64de5c67143a31697298f638478360e -Author: Dinesh Israni -Date: Mon Mar 2 15:31:10 2020 -0800 - - Add expected count for app backup schedule tests - -commit 68f3e25ef2827db3595dc7034da2e673c2baf1ec -Author: Dinesh Israni -Date: Mon Mar 2 15:29:01 2020 -0800 - - Vendor update for sched-ops to fix validation in test - -commit 5471a7e067ee0d5488f6e567d818d13fe23cd692 -Author: Dinesh Israni -Date: Thu Feb 27 12:43:43 2020 -0800 - - [Portworx] Add progress info for backup and restore to reason - -commit 1a0894e54ab8bfe39d40f36647cd1386efaa2517 -Author: Dinesh Israni -Date: Wed Feb 26 19:17:53 2020 -0800 - - Get the correct pluralized name of resources for the dynamic client - -commit b517566728495db466e05af096b01e04bb14696e -Author: Dinesh Israni -Date: Fri Feb 28 14:13:37 2020 -0800 - - Use correct type for length of backups in test - -commit 6f6efe4bf45d8ecfa10286d37c4fbb05a7671a13 -Author: Rohit-PX -Date: Thu Feb 27 18:45:23 2020 -0800 - - Vendor update for torpedo azure, aws vol drivers - - Signed-off-by: Rohit-PX - -commit 2b2eb1f09dee98fb0e0a3c35bb72ff665b8eb302 -Author: Rohit-PX -Date: Thu Feb 27 18:44:22 2020 -0800 - - Use aws, azure vol drivers from torpedo - - Signed-off-by: Rohit-PX - -commit af794e1527fca0d4370f08874dd05bf18c9780a6 -Author: Dinesh Israni -Date: Thu Feb 27 18:11:55 2020 -0800 - - Use correct API to delete backups in test - -commit abfb56253904f636b579f4c1af32407b8e66a240 -Author: Dinesh Israni -Date: Thu Feb 27 12:49:55 2020 -0800 - - Check for correct number of appplication backups in schedule test - -commit 139343270baa3cca22f1c5e8743a20a750c61ca5 -Author: Dinesh Israni -Date: Wed Feb 26 17:40:49 2020 -0800 - - Update applicationbackupschedule test for new retain policy - - Backups created by schedule aren't deleted when the schedule is deleted - -commit 58785daf81b6fe0d2bf0f018b06e4fd217af3e4f -Author: Dinesh Israni -Date: Wed Feb 19 15:55:08 2020 -0800 - - [Portworx] Don't migrate volumes which have the skipResource annotation - -commit f3228bec42288507a084d03f4a8284a755aac9c1 -Author: Dinesh Israni -Date: Wed Feb 19 16:25:42 2020 -0800 - - Cache the go cache directory in travis builds - -commit 699611ae6e52a136641f1e20650ba79f6a3fdc02 -Author: Dinesh Israni -Date: Wed Feb 19 16:18:26 2020 -0800 - - Update make in travis to start 2 jobs at once - -commit aeb8b783b9a1a475fa555eb4fc737c08f580f0ca -Author: Dinesh Israni -Date: Wed Feb 19 15:39:44 2020 -0800 - - Set defaults for the spec in migration schedule - -commit aa166f45857a8d5576eb608fac6c2432a01c3c3a -Author: Dinesh Israni -Date: Wed Feb 19 15:39:20 2020 -0800 - - Set the default reclaim policy to retain for application backup schedule - -commit 0fd730ccf20dda71264e30cb9a116861aa2aa11e -Author: Dinesh Israni -Date: Wed Feb 19 15:59:00 2020 -0800 - - Collect CronJob objects for migration, backup and clones - -commit 705e53b68b3379538b326bfc3a97d5da749ce897 -Author: Rohit-PX -Date: Mon Feb 24 23:03:31 2020 -0800 - - Vendor update for GCE torpedo driver - - Signed-off-by: Rohit-PX - -commit c8b52067ce1270643c9565d36b58390428afcbd3 -Author: Rohit-PX -Date: Mon Feb 3 17:43:44 2020 -0800 - - Ability to deploy apps on cloud platforms - - Signed-off-by: Rohit-PX - -commit 9d9feb71284a79e1d8fa0bbbda3c3ac078c8ba79 -Author: Dinesh Israni -Date: Thu Feb 20 16:04:40 2020 -0800 - - Vendor update for sched-ops - - Fixes issue in integration test while checking for status - -commit df061d0810ec583d9a4ca41d679eaa869fd893c4 -Author: Rohit-PX -Date: Tue Feb 18 19:22:09 2020 -0800 - - Scale down cassandra app after cloning - - Signed-off-by: Rohit-PX - -commit 4e8943ff3d46225f6636e3f3672655c516a6ce81 -Author: Dinesh Israni -Date: Thu Feb 13 20:41:48 2020 -0800 - - Remove initializer option from test script - -commit c975353e957a123d54acfab4bef3bbeda526e7d2 -Author: Dinesh Israni -Date: Thu Feb 13 14:44:06 2020 -0800 - - Fix events in webhookadmission controller - - Events need to be raised against an object. Raise it against the - deployment/statefulset/pod if we have it, otherwise raise it against the webhook - config - -commit 046dc744d6e7ec4d0a0abee6369620dc1b064a2d -Author: Dinesh Israni -Date: Fri Feb 7 19:07:39 2020 -0800 - - Update APIs for k8s 1.16 - - - Removed initializer since it has been deprecated since k8s 1.14 - - Updated sched-ops APIs - - Updated printing from storkctl since APIs had changed - -commit 67b5cf0ec29dca0f0be53f21b45277fd9502b7aa -Author: Dinesh Israni -Date: Thu Feb 6 20:02:33 2020 -0800 - - Update vendor for k8s and dependencies to 1.16.6 - -commit 350b6cb6e50c6f8bed133f921308d2ad3239e2cf -Author: Rohit-PX -Date: Mon Feb 10 22:33:37 2020 -0800 - - Fix regex to replace entire stork,stork_test image names - - Signed-off-by: Rohit-PX - -commit 5823ef060e9d14e75de976bacf4f13e418c274cc -Author: Dinesh Israni -Date: Thu Feb 13 16:53:55 2020 -0800 - - Fix duplicate tags being checked for aws driver - - Use constants to avoid typos - -commit 38e7d20a5cf56e7c71eca01d8d66da0d42220a18 -Author: Dinesh Israni -Date: Wed Feb 12 14:58:59 2020 -0800 - - [Azure] Store the resource group for volumes during snapshot - - Use it during restore so that it can be restored across resource groups - -commit 136de3ab7bac1057638fdb3a9cd71a23095d9e5f -Author: Dinesh Israni -Date: Tue Feb 11 14:00:41 2020 -0800 - - Change driver init error message to debug - -commit 323faeab5ae7319eb79ee2a83d3f192880e47514 -Author: Dinesh Israni -Date: Tue Feb 11 14:59:28 2020 -0800 - - Remove unused status types - -commit c0244eb623c67d3f6a858d44119de2630a7f3c0b -Author: Arthur Lutz -Date: Tue Feb 11 12:50:38 2020 +0100 - - [README] typo fix - -commit 2a14501ef2b758af297d3366222425d950ec31ae -Author: Dinesh Israni -Date: Thu Feb 6 00:25:45 2020 -0800 - - Init the schedule package even if driver isn't specified - -commit 977f8cbb26b5e5a4a8576023dfc5f3fedba637f6 -Author: Dinesh Israni -Date: Thu Dec 12 16:31:00 2019 -0800 - - Implementation for AWS driver - - Added support for ApplicationBackup and ApplicationRestore APIs - -commit 63b71c459fddc854ddce4e1d1c713e9c831f39eb -Author: Dinesh Israni -Date: Thu Dec 12 16:29:22 2019 -0800 - - Vendor update for aws - -commit 241c1b39f4af7582a67adbde916653c00c3b19a6 -Author: Dinesh Israni -Date: Tue Jan 21 16:47:32 2020 -0800 - - Don't need to prepare PV when doing backup - -commit 8630944976dc472d05beaba7778a1df4d62c553a -Author: Dinesh Israni -Date: Tue Jan 21 16:47:12 2020 -0800 - - Implementation for Azure driver - - Added support for ApplicationBackup and ApplicationRestore APIs - -commit 7c3a29584a43b60e091564936543817dbf2d2aa1 -Author: Dinesh Israni -Date: Fri Dec 6 17:19:11 2019 -0800 - - Vendor update for azure dependencies - -commit 29a157cdd125c9b9f7b0b3d2457f431beb34c096 -Author: Dinesh Israni -Date: Wed Jan 29 17:49:14 2020 -0800 - - Add support for gcp multi zone disks - - Also use zone from disk when creating snapshot and disk instead of the zone - where stork is running - -commit d55b799989020c56a5861887beb7ab8a1b6346ca -Author: Rohit-PX -Date: Wed Feb 5 08:49:35 2020 -0800 - - Increase wait time for synced backups to appear on the destination cluster - - Signed-off-by: Rohit-PX - -commit 5b5621941971bc65d64c8a802174aae723fc22de -Author: Serhii Aheienko -Date: Thu Jan 30 13:35:18 2020 +0200 - - Add Data export api definition - - Signed-off-by: Serhii Aheienko - -commit dae7f407ffe028c0f156776b37c98cd453593ff3 -Author: Dinesh Israni -Date: Fri Jan 24 13:23:07 2020 -0800 - - Use correct namespace when checking status of adminClusterPair - -commit d1f7095fb037600bb718e5ecaa0a6df5ab1d63f1 -Author: Ram -Date: Tue Jan 21 14:40:25 2020 +0530 - - vendor update for sched-ops - - Signed-off-by: Ram - -commit 258723fd0feed24664be867139f19e80d0a1d280 -Author: Ram -Date: Mon Jan 20 22:14:13 2020 +0530 - - get stork namespace from env for webhook - - generate CN using namespace in env - - log server failure errors - - Signed-off-by: Ram - -commit 2db938fa528ac8fa7cc98030ebd8157180eacd44 -Author: Ram -Date: Thu Jan 9 19:50:29 2020 +0530 - - generate self signed certificates for mutate webhook - admission controller - - sched-ops vendor updates - - Signed-off-by: Ram - -commit 09f2784315ea29a0ff2e8484b0d255526e0451dc -Author: Ram -Date: Fri Dec 27 12:15:17 2019 +0530 - - add mutating webhook controller to stork - - start webhook server on 443 - - add listner for mutate admission - - verify if deployment/ss using px volume and update - scheduler to stork - - update stork specs with webhook admission configuration - - fix errcheck warnings - handle pod scheduler update for webhook controller - - nil check for non persistentclaim source - - allow default app deployment - - update scheduler path to include pod spec path - - Signed-off-by: Ram - -commit 87997a219d1746440a617477265f712156a98687 -Author: Dinesh Israni -Date: Fri Jan 24 13:25:02 2020 -0800 - - Revert "Use correct namespace when checking status of adminClusterPair" - - This reverts commit c963f03b29357fd3326afb474b8f8d4ddf5a3533. - -commit c963f03b29357fd3326afb474b8f8d4ddf5a3533 -Author: Dinesh Israni -Date: Fri Jan 24 13:23:07 2020 -0800 - - Use correct namespace when checking status of adminClusterPair - -commit fba23c30430f40f7a99bdec81d94d67f90aeedf4 -Author: Dinesh Israni -Date: Fri Jan 3 02:21:21 2020 -0800 - - Remove stale generated files - -commit 2a3c6a17d572970d683adecd819a953c5e7f093c -Author: Dinesh Israni -Date: Fri Dec 13 13:52:16 2019 -0800 - - Update start up parameters - - Start the application manager controllers even if no driver is specified - -commit 8748a0f36bd807dcce63089e211dbd6eaf2e0360 -Author: Dinesh Israni -Date: Fri Dec 6 17:19:11 2019 -0800 - - Vendor update - -commit 25d286f112b8c57b38737be311da23b314329376 -Author: Dinesh Israni -Date: Sat Oct 19 01:30:24 2019 -0700 - - GCE driver implementation - - Can be used to take snapshots of GCE PDs and restore them to PVCs - -commit 82217b2fa71de790b20f831b162fbbe010ec1f56 -Author: Dinesh Israni -Date: Thu Oct 17 22:38:43 2019 -0700 - - Changes for app backup/restore to work with multiple drivers - - - When taking a backup we will now look through PVCs in the namespace for all - supported drivers and try to create a backup - - The driver name will be stored in the backup object so that we know which - driver to use when doing the restore - - This also allows us to create a backup with multiple volume drivers in the - same backup - - Added an options field in the application backup CRD which can be used to - store info like projectID, zone, etc for cloud drives - -commit 44b7af4da13149fe8da7dbe31d31e1c22b8ffef9 -Author: Rohit-PX -Date: Sun Jan 19 23:00:46 2020 -0800 - - Increase wait time for backups to be synced - - Signed-off-by: Rohit-PX - -commit ef10d727b91499774ab383e12f02b326f26a03ed -Author: Dinesh Israni -Date: Fri Jan 17 12:06:26 2020 -0800 - - Disabling unittest because of bug in fake client - -commit 23da4bc8555004684b6dd1ff76eca184903ca94f -Author: Dinesh Israni -Date: Thu Jan 16 18:50:55 2020 -0800 - - Add activate/deactive migration support for ibp objects - -commit fa8da1871df18f980582a99b83fe0c337185623f -Author: Dinesh Israni -Date: Thu Jan 16 18:50:33 2020 -0800 - - Vendor update - -commit 7dc3df13f98522aa1a0604e0223877f9ab6c8c4c -Author: Dinesh Israni -Date: Wed Oct 9 17:15:01 2019 -0700 - - Add support for some CRDs to resourcecollector - -commit fd149960d627014354ad233a0675cc9c3e0e0ec1 -Author: Grant Griffiths -Date: Fri Dec 13 14:15:09 2019 -0800 - - Add stork secrets implementation based on lsecrets.Instance() - - Signed-off-by: Grant Griffiths - -commit 4ac2b39d8e86570b99842ddb1eee7c3205df8dfe -Author: Rohit-PX -Date: Wed Nov 13 00:42:23 2019 -0800 - - Scaled integration test for app backup - - Signed-off-by: Rohit-PX - -commit 54645c25047d6a27e15777c2f0f3ed014ac35739 -Author: Ram -Date: Wed Dec 4 13:26:43 2019 +0530 - - fix staticcheck errors - - Signed-off-by: Ram - -commit f92709c17b81de61747e17d0b7ef6b28c605c68e -Author: Ram -Date: Wed Dec 4 12:34:13 2019 +0530 - - [portworx] bump px version for in place restore feature - - Signed-off-by: Ram - -commit fb8f8cb4452d8235a511516e312bb1c692cb9835 -Author: Ram -Date: Fri Nov 22 21:00:59 2019 +0530 - - add integration test for migration cleanup feature - - Signed-off-by: Ram - -commit 8026c7c36869492b5940cc8934c0ef1d7bf3f6e2 -Author: Ram -Date: Tue Nov 26 19:18:17 2019 +0530 - - Move destination resource collection to migration controller - - use NewInstance() instead of changing singleton k8s instance in - resource collector - - collect old resources to purge inside migration controller instead - of resourcecollector pkg - - change migration cleanup status to migration purge - - collect resource from destination cluster using new resource - collector - - Signed-off-by: Ram - -commit ef30674a7ccafe91fd4f7a7b2392ef4f96dac9ea -Author: Ram -Date: Mon Nov 18 23:55:35 2019 +0530 - - Cleanup migrated resources from destination cluster - - modified GetResources() to accept cluster config to fetch resource from - - only fetch resources which has migration annotation - - find and delete cleaned up resources from dest cluster - - Signed-off-by: Ram - -commit df796e6cf246fff637588339d7b20aeafc5f865e -Author: Ram -Date: Tue Nov 12 23:56:30 2019 +0530 - - Add CleanupResources flag to migration specs - - add annotation to migrating resources by stork - - have stub to cleanup resources for dest cluster - - Signed-off-by: Ram - -commit a1da847c31d14ea0400c33c3392c1acd64b938de -Author: Ram -Date: Tue Nov 26 10:26:56 2019 +0530 - - Wait for volume restore to succeed - - Signed-off-by: Ram - -commit 5ad0ac664eaadb93953c6b28e1740a4c4edb9928 -Author: Dinesh Israni -Date: Fri Nov 15 18:34:42 2019 -0800 - - Rename stage in VolumeSnapshotRestore - -commit e1dce3b483660f487c2d9038e3846a91e9482528 -Author: Dinesh Israni -Date: Thu Nov 14 16:56:37 2019 -0800 - - Increase timeout for backup test - -commit c762b61282ce760618d5c2b6a30ba22d1d5e701d -Author: Dinesh Israni -Date: Tue Nov 12 23:00:29 2019 -0800 - - Remove call to verifySnapshot for cloudsnap test - -commit 27d8b1ec0ac530b94524eeb9463734164f21b1ed -Author: Rohit-PX -Date: Mon Nov 11 14:46:58 2019 -0800 - - Verify snapshot instead of groupsnapshot - - Signed-off-by: Rohit-PX - -commit e10b370262ef1ff1231525956a17cc020aea4013 -Author: Rohit-PX -Date: Mon Nov 11 13:17:54 2019 -0800 - - Wait for snapshot to complete in in-place restore tests - - Signed-off-by: Rohit-PX - -commit 8e91de91cbfe9118bfc451b770c5c01703ce827c -Author: Ram -Date: Fri Nov 8 08:28:23 2019 -0800 - - [Portworx] fixes groupcsrestore feature - - - correct error msg for restore failure - - pass only one pool while doing haupdate - - add debugging for correct poolids - - Signed-off-by: Ram - -commit 77285813a5a3442b2a09f51bdb0244b6be689b1d -Author: Rohit-PX -Date: Thu Nov 7 11:18:45 2019 -0800 - - Ability to skip restore tests even when running individually. - - Signed-off-by: Rohit-PX - -commit 589f83239dfe6690868ee909ac0d56f11a897d55 -Author: Dinesh Israni -Date: Wed Oct 30 17:28:41 2019 -0700 - - Updates for storkctl - - - Add missing backupName when creating ApplicationRestore - - Add replacePolicy param when creating ApplicationClone and ApplicationRestore - - Fix alias clash for VolumeSnapshotRestore and ApplicationRestore - -commit 64e6b90ad5b6bacec47400c2ee47e1282b8d7bfa -Author: Rohit-PX -Date: Wed Nov 6 16:20:25 2019 -0800 - - Add vendor updates for auth groupvolumesnapshot. - - Signed-off-by: Rohit-PX - -commit fde1fdca2bd6d87b55187d5101a37bb5feedb59b -Author: Rohit-PX -Date: Wed Nov 6 00:15:05 2019 -0800 - - Add test for group cloudsnap restore, flag to skip tests. - - Signed-off-by: Rohit-PX - -commit 02f776fcdd7db501a0309b8a182b222bc3df8821 -Author: Ram -Date: Tue Nov 5 09:02:49 2019 -0800 - - pass poolids to Nodeid for ha update api - - Signed-off-by: Ram - -commit efcaadf1e8ecc80a73a9b1385d4559393d7faaf3 -Author: Rohit-PX -Date: Mon Nov 4 16:48:21 2019 -0800 - - Parameterize stork volume driver for integration tests. - - Signed-off-by: Rohit-PX - -commit 82e4959488320b4f145f8af50dde37ce4c4d9add -Author: Ram -Date: Mon Nov 4 02:22:25 2019 -0800 - - enable cs inplace restore integration tests - - Signed-off-by: Ram - -commit dc4974e1cb9ac0abeeb7e90f8bae038fbd02bd1e -Author: Ram -Date: Thu Oct 31 12:15:40 2019 +0530 - - update ha increase api nodeid to poolids - - update gettoken api with vendor changes - - Signed-off-by: Ram - -commit 12d6d093f6895e9088f230d47ba6ced9fd2aba58 -Author: Ram -Date: Thu Oct 31 12:13:29 2019 +0530 - - vendor update openstorage/release-7.0 - - Signed-off-by: Ram - -commit b2844ea9c522ed824f70c82e1276e2b672db7e9b -Author: Rohit-PX -Date: Fri Nov 1 16:43:32 2019 -0700 - - Remove sleep which is not required. - - Signed-off-by: Rohit-PX - -commit 5969ad9bac19eed82915e0ff3daa380a5ea5495f -Author: Rohit-PX -Date: Fri Nov 1 12:32:42 2019 -0700 - - If stork_test is running, tail pod logs. - - Signed-off-by: Rohit-PX - -commit 3b305535b27f177d209dce3e4f2554ea1388c550 -Author: Rohit-PX -Date: Thu Oct 24 13:46:28 2019 -0700 - - Add API to wait for backup completion in integration tests. - - Signed-off-by: Rohit-PX - -commit b4315735cda93292155d9f99b4e56743ee9a94b6 -Author: Dinesh Israni -Date: Wed Oct 30 15:39:21 2019 -0700 - - Update google sdk and some python packages in container - -commit 0edbe2ce1c72172aab585ba6baf2008eba1b750d -Author: Rohit-PX -Date: Tue Oct 29 12:38:12 2019 -0700 - - Vendor update for torpedo. - - Signed-off-by: Rohit-PX - -commit c1127c65a863a8530e0a288f536a113fef01f983 -Author: Rohit-PX -Date: Mon May 6 11:45:40 2019 -0700 - - Add ability to run in auth-enabled environment. - - Signed-off-by: Rohit-PX - -commit 17aa6d2fcc4a5dd9c7dfa446b3fc2ddcfd3f7fbd -Author: Rohit-PX -Date: Mon Oct 28 13:49:14 2019 -0700 - - Add selector labels to stork daemonset and cassandra example. - - Signed-off-by: Rohit-PX - -commit ecdc6c2125b0df5ab04c60bdf3e6736bd59ccef1 -Author: Rohit-PX -Date: Mon Oct 28 12:34:03 2019 -0700 - - Fix selector label. - - Signed-off-by: Rohit-PX - -commit 183987621be9e1f2c5a8556ece6cba89034fb6f5 -Author: Rohit-PX -Date: Mon Oct 28 11:52:49 2019 -0700 - - Add selector to spec. - - Signed-off-by: Rohit-PX - -commit deff958975694c7f3769a968cde0a76f6a5c42f7 -Author: Rohit-PX -Date: Mon Oct 28 11:20:16 2019 -0700 - - Update API version to v1. - - Signed-off-by: Rohit-PX - -commit 35060e5a26d0dddbd53bef5b8c30f80e803e0a23 -Author: Dinesh Israni -Date: Fri Oct 25 16:00:39 2019 -0700 - - Update spec version for deployments - - Also add csinodes permission for scheduler clusterrole, - required for k8s 1.16 onwards - -commit c938095c7e5aa3fae294af24b167e67f552c460c -Author: Rohit-PX -Date: Mon Oct 14 01:36:27 2019 -0700 - - Migration scale test. - - Signed-off-by: Rohit-PX - -commit c232cfedb42d2bccd62dbb9d58496ccaa039dce0 -Author: Rohit-PX -Date: Mon Oct 21 18:21:22 2019 -0700 - - Update torpedo vendor. - - Signed-off-by: Rohit-PX - -commit 0d54b386a278b6e20402d9cdf125bfb5f3216207 -Author: Dinesh Israni -Date: Sat Oct 19 01:31:08 2019 -0700 - - Update test flag parsing which was broken in go 1.13 - -commit e340d820e733cdaba3a2e9b3ee690bb651345b8f -Author: Dinesh Israni -Date: Thu Oct 17 22:46:19 2019 -0700 - - Update go version in travis to 1.13.1 - -commit 66ea6ffe433cdd17e375439e35983f322fc0afc4 -Author: Dinesh Israni -Date: Thu Oct 17 22:45:56 2019 -0700 - - Vendor update - -commit c046355acd59894ef543c4be9d7881891d6775e3 -Author: Dinesh Israni -Date: Wed Oct 9 18:04:50 2019 -0700 - - Groupsnapshot: If objects exist reuse them if they are the same - - Else delete and re-create them - -commit e3114609abac69b49174f2ae0d6fed87359bf783 -Author: Harsh Desai -Date: Sun Oct 13 10:00:07 2019 -0700 - - update vendor to use sched-ops kubernetes-1.11 - - Signed-off-by: Harsh Desai - -commit cc2e9f96a038bba5e533f9213172ccab12b0b52e -Author: Dinesh Israni -Date: Wed Oct 9 18:05:59 2019 -0700 - - Bump version on master to 2.4.0 - -commit 6a3497c42b2a12fd5b6337a2b81d760b91cbb5f4 -Author: Rohit-PX -Date: Wed Oct 9 12:14:54 2019 -0700 - - Fix error with creation of stock-mock-time. - - Signed-off-by: Rohit-PX - -commit 283dfa2d646afcdbbdb59e69614169b3697f575f -Author: Dinesh Israni -Date: Wed Oct 9 01:25:35 2019 -0700 - - Use created config map for watch on mock time - - Previously if the mock time config map was not created we would end up using the - empty config map to start the watch - -commit fb628de7dcfb6ccfe0c92ed4ba4bcd7b9341c3dd -Author: Dinesh Israni -Date: Fri Oct 4 19:28:22 2019 -0700 - - Add checks for some resources when collecting them - - - Secrets: Ignore autocreated secrets with well known prefix names - - ServiceAccounts: Ignore autocreated service accounts unless they have - non-default image pull secrets configured - - Ingress: Only collect namespaced scoped objects - - Role/RoleBinding: Ignore autocreated objects starting with "system:" - -commit f8ae20dc8570259da875992298e8a450bb6fee50 -Author: Dinesh Israni -Date: Fri Oct 4 19:27:16 2019 -0700 - - Fix retry for Unauthorized errors when migrating resources - -commit 05b615774d3b9b9f351056f0cc59a6d66a31a300 -Author: Dinesh Israni -Date: Thu Sep 26 17:08:26 2019 -0700 - - Some optimizations for migration - - - Don't cancel migrations that are in Final stage - - Don't update migration schedule after prune if the object wasn't updated - -commit 10c3e3e72bb5e0511beed1d3265117265f188791 -Author: Dinesh Israni -Date: Sat Sep 28 14:18:23 2019 -0700 - - Ignore PVCs that are being deleted during backup - -commit 0362b76554980d2f3c5e25516c0d30b0c6141bc4 -Author: Dinesh Israni -Date: Sat Sep 28 14:07:13 2019 -0700 - - Fix termination of backoff for cancelling backups - -commit 85080c7e6bca8b08ac05c9092c6917a41620aed5 -Author: Dinesh Israni -Date: Fri Sep 27 17:09:49 2019 -0700 - - Add the old parameter for cluster-admin-namespace - - Added the message that it is deprecated - -commit fa4dd6972783a9e99394ef4f431d5693e98b5943 -Author: Aditya Dani -Date: Thu Sep 26 18:40:26 2019 -0700 - - Cluster domain tests: Wait till scale factor reduces to 0 before failover. - -commit d260c19a2f888a13cecb7ab75676e165c47df6f2 -Author: Rohit-PX -Date: Thu Sep 26 14:21:14 2019 -0700 - - Remove incorrect check for reclaim policy. - - Signed-off-by: Rohit-PX - -commit 1ceddf079b835d0d7706d8b728e2a39dc987b469 -Author: Rohit-PX -Date: Tue Sep 24 00:59:35 2019 -0700 - - Backup to and restore from S3, Azure, Google. - - Signed-off-by: Rohit-PX - -commit 599aea4030e917386351da19d7c21ebeb0e65f70 -Author: Dinesh Israni -Date: Tue Aug 13 17:48:04 2019 -0700 - - Cancel and fail backup if starting volume backup fails - -commit cfb91a641e74963c829927ca003db28412830933 -Author: Dinesh Israni -Date: Wed Sep 25 15:45:22 2019 -0700 - - Revert "Update travis for 2.3 branch" - - This reverts commit db3ec66dba91bc6ca150ce900953c093b9a52014. - -commit 7b52a5e7fa4573b48a1859d16ba4f36c3b4bb821 -Author: Aditya Dani -Date: Tue Sep 24 16:25:50 2019 -0700 - - Check the scale factor of the application after scaling it down. - -commit db3ec66dba91bc6ca150ce900953c093b9a52014 -Author: Dinesh Israni -Date: Wed Jan 30 19:23:58 2019 -0800 - - Update travis for 2.3 branch - -commit 252f6145c4a665c062bb846c152a68d6d8e4b196 -Author: Dinesh Israni -Date: Tue Sep 24 18:12:38 2019 -0700 - - Fix race in migration schedule integration test - - The prune for migrations might still be in progress, so add retries - -commit 8a05f413d630ac7ee240ddd4d7f17b38f6685245 -Author: Rohit-PX -Date: Mon Sep 9 17:42:59 2019 -0700 - - Add migration test with startapp flag set to false. - - Signed-off-by: Rohit-PX - -commit e7a053a135d33a809aef2ce5bf1cbad2a940379e -Author: Ram -Date: Mon Sep 23 23:54:49 2019 +0530 - - update UT's to additionally check for volume count for snapRestore - - Signed-off-by: Ram - -commit 75e1685a38c35f699f148ce523b9f9bb9b25f331 -Author: Ram -Date: Mon Sep 23 23:54:15 2019 +0530 - - vendor update portworx/sched-ops - - Signed-off-by: Ram - -commit 030eba580b3244d9078c0aa0b9f4c5b804860b1d -Author: Ram -Date: Mon Sep 16 20:52:30 2019 +0530 - - Correct volume count for storkctl snaprestore output - - remove unnecessary logs - Signed-off-by: Ram - -commit c27038598439486e23f6ca7bba3b82876e82e55e -Author: Dinesh Israni -Date: Fri Sep 20 15:57:10 2019 -0700 - - Don't migrate default rbac objects - -commit 01815b26d3054175803c6d84ffc747b90866abf8 -Author: Dinesh Israni -Date: Thu Sep 19 17:44:54 2019 -0700 - - Retry migrations for Unauthorized errors - - The kube apiserver can sometimes return Unauthorized when running on the cloud - if there are temporary auth errors - -commit ae892f9156bdbb5bad9c96b62f7684f0fd1b9660 -Author: Dinesh Israni -Date: Fri Sep 20 16:14:43 2019 -0700 - - Update the number of workers to 10 for each controller - - Will make if configurable for each controller in the future - -commit 1d6146f1317ff5ab05f2773dccad82299cbbda7d -Author: Dinesh Israni -Date: Thu Sep 19 11:05:44 2019 -0700 - - Vendor update for snapshotter - - Adds cache when listing volumesnapshotdata instead of reading from - kube apiserver everytime - -commit fee9c8f36b864053218d92a1bb95fb701508acd7 -Author: Ram -Date: Sat Sep 21 09:56:52 2019 +0530 - - detect snapshot type while cleaning up restore objects - in volumesnapshotrestore - - Signed-off-by: Ram - -commit a3d5ce729068f35dc869692b7a7568fb40544d22 -Author: Ram -Date: Fri Sep 20 23:32:01 2019 +0530 - - update torpedo api changes to integration-test - - Signed-off-by: Ram - -commit aa7bd73051250f5fb0ad2898edea339d2493a474 -Author: Ram -Date: Fri Sep 20 05:09:11 2019 -0700 - - Vendor update torpedo - - Signed-off-by: Ram - -commit 787a5ed469c69314befd7dcd9c7c8a45269cefb4 -Author: Ram -Date: Fri Sep 20 17:11:32 2019 +0530 - - replace maps for restore volumeinfo in crd - - use pvc,namespace filed to extract volume information - - clean snapshot in-place restore for each volume - - show restore status for each volume - - Signed-off-by: Ram - -commit 8ede2852f88dc462e98afc0ed9852c9c977b8c84 -Author: Ram -Date: Fri Sep 20 01:08:39 2019 +0530 - - store pvc name and namespace mapping instead of whole pvcspec - - Signed-off-by: Ram - -commit 4fa4511244de4ed413160adf9022510cbf031d68 -Author: Ram -Date: Fri Sep 20 01:07:38 2019 +0530 - - add detail volume info snaprestore crds - - Signed-off-by: Ram - -commit c9e7e76b061d373818e077b73dd95f513744ae37 -Author: Grant Griffiths -Date: Fri Sep 20 15:36:14 2019 -0700 - - Check ownership of VolumeAttachment before deleting - - Signed-off-by: Grant Griffiths - -commit 9e044d720820005da7289c035b674b3ebe11bb82 -Author: Rohit-PX -Date: Fri Sep 20 14:45:34 2019 -0700 - - Revert changes made for testing. - - Signed-off-by: Rohit-PX - -commit 14e782fc2c7049e7255cdcbcc995bde4e9d4e72c -Author: Rohit-PX -Date: Fri Sep 20 14:25:43 2019 -0700 - - Use scale factor from destination cluster to scale source in failback. - - Signed-off-by: Rohit-PX - -commit 302785e0af3836fc691b0497c71492940422c902 -Author: Rohit-PX -Date: Thu Sep 19 13:27:14 2019 -0700 - - Set old scale factor in failback test. - - Signed-off-by: Rohit-PX - -commit 545fc4666da109ca557a9ab3e97691f5d895daea -Author: Grant Griffiths -Date: Fri Sep 20 12:56:20 2019 -0700 - - Delete VolumeAttachments for down node or pod in unknown state - - Signed-off-by: Grant Griffiths - -commit 01ec37438733c3fbfeb211301567fc168d26549a -Author: Grant Griffiths -Date: Fri Sep 20 11:38:20 2019 -0700 - - Update vendor - - Signed-off-by: Grant Griffiths - -commit 3a9a677b79598b0743789b65410c87eeaa7b14bf -Author: Luis Pabón -Date: Wed Sep 18 17:49:31 2019 -0700 - - Fix CSI unstructured object access - -commit a6c426c7187b0b74e0f880cd084734eb9ac15728 -Author: Rohit-PX -Date: Thu Sep 19 22:38:21 2019 -0700 - - Reset config to source after backup sync controller test. - - Signed-off-by: Rohit-PX - -commit f2d0a9436341ec357fcb74812ddcf8f53ff097da -Author: Rohit-PX -Date: Thu Sep 19 14:02:30 2019 -0700 - - Order snapshot tests to run before migration tests. - - Signed-off-by: Rohit-PX - -commit 4757bf0faa5ca18a825a2191a23259740b0d64e8 -Author: Dinesh Israni -Date: Tue Sep 17 23:29:11 2019 -0700 - - Create annotations for app during migration if it doesn't exist - - Also remove noisy log message - -commit 4c87ec9e5c7d82ceb757fbc0d8fdecdff32d30bf -Author: Rohit-PX -Date: Tue Sep 17 11:27:59 2019 -0700 - - Use cassandra instead of mysql for clusterdomain migration. - - Signed-off-by: Rohit-PX - -commit 4241f55a4914edabdb06034687faf4d46afee503 -Author: Rohit-PX -Date: Thu Sep 12 17:32:39 2019 -0700 - - Allow running of individual tests. - - Signed-off-by: Rohit-PX - -commit be7040a0e3c8efdd52cce0d99ba3a432740387c8 -Author: Rohit-PX -Date: Mon Sep 16 17:56:13 2019 -0700 - - Use name from the existing secret object as new obj might be nil. - - Signed-off-by: Rohit-PX - -commit 0d9a17c9366915f0ff7da31fdc43c84dfad01146 -Author: Dinesh Israni -Date: Thu Sep 12 17:59:32 2019 -0700 - - Update API group for stork-scheduler permission for replicaset - -commit a56395cd9c555c930029868a9a22fdc618967356 -Author: Ram -Date: Wed Sep 11 12:26:21 2019 +0530 - - Don't add duplicate entry for already present imagepullSecrets - - Signed-off-by: Ram - -commit 7a2c14100a8b0049bc65a7611561613f1b657977 -Author: Ram -Date: Tue Sep 10 16:45:42 2019 +0530 - - Migrate image pull secrets associated with default service account - - Signed-off-by: Ram - -commit d7624f2f4e0d1da8cf229ef4327ffe1bb7aaf223 -Author: Dinesh Israni -Date: Wed Sep 11 17:02:45 2019 -0700 - - Don't set predicates or priorities for scheduler - - It picks up the defaults from k8s 1.10 onwards - -commit dad6473aab94a2b037e3ce4d70b32e3df4337c51 -Author: Dinesh Israni -Date: Wed Sep 4 15:10:10 2019 -0700 - - Create namespaces for restore if they don't already exist - -commit 69c001906756bf7dc5e34e22463e1fa509ade550 -Author: Dinesh Israni -Date: Wed Sep 4 15:02:00 2019 -0700 - - Remove owner ref from synced backups - - This was causing backups to be deleted by the k8s because the owner, - which is the backup schedule, won't be present on remote cluster - -commit c46f6dfa27d3a613deccb9e7bbfb7c3a5cf3421e -Author: Dinesh Israni -Date: Wed Aug 14 22:12:39 2019 -0700 - - Add check in monitor to skip duplicate offline nodes with same IP - - It is possible for the storage driver to return information for 2 nodes with the - same IP if a new node was re-using the IP from a node that was removed. - In that case the health monitor would incorrectly determine that the storage - driver was offline on the node. - - This change removes offline nodes with duplicate IPs - -commit e5ea8255801a5852cee711ee7c668902689a438b -Author: Dinesh Israni -Date: Wed Aug 28 19:27:58 2019 -0700 - - Convert reclaim policy to string before setting in unstructured object - - Can't set custom types using SetNestedField - -commit c39cbb36199212065a9cb5b8d13391ca5a0b6488 -Author: Dinesh Israni -Date: Tue Sep 3 15:49:46 2019 -0700 - - Create namespace mapping map in application restore if not present - -commit 54a430952512a9832831723f211cb1fe3bcdb964 -Author: Rohit-PX -Date: Thu Aug 22 18:45:43 2019 -0700 - - Integration test for backup sync controller. - - Signed-off-by: Rohit-PX - -commit 4a0e6d84471783bdca391ab887cc8c24eeeec60c -Author: Aditya Dani -Date: Tue Aug 27 12:57:48 2019 -0700 - - Move the ClusterDomainsStatus.Info check within the retry task. - - - Stork will create the CDS object but it might not update the Info object - until it gets it back from the storage provider. - -commit 3224f511a5932974a793d9df5637eb122cdb6252 -Author: Rohit-PX -Date: Mon Aug 26 17:57:54 2019 -0700 - - Increase wait time when waiting for cluster domain list. - - Signed-off-by: Rohit-PX - -commit fce3389cc68a1893ac96daac6358bf43a4978ea6 -Author: Aditya Dani -Date: Sun Aug 25 23:13:10 2019 -0700 - - Cluster Domain Integration Tests - - - Add a task wait and retry over ListClusterDomainStatus call - before concluding that the tests are not running in cluster domains - environment. - -commit faea830df8aac52591087439e01f1242cffb0d45 -Author: Dinesh Israni -Date: Sat Aug 24 22:21:13 2019 -0700 - - Update go version for travis build - -commit ca799b78d7fce4825107a78b6f11918dd3240cf3 -Author: Rohit-PX -Date: Fri Aug 23 16:10:49 2019 -0700 - - Add quotes to the enable cluster domain flag. - - Signed-off-by: Rohit-PX - -commit bc0e9926fd84d65fe0cfc91e423a22f30c419dba -Author: Rohit-PX -Date: Thu Aug 22 18:49:08 2019 -0700 - - Fix flag for enabling cluster domain tests in integration tests. - - Signed-off-by: Rohit-PX - -commit 53099ddfc07741866ff1580028bf5bf9c3ef1541 -Author: Rohit-PX -Date: Tue Aug 20 16:33:21 2019 -0700 - - Add 'nil' param to ListNamespaces sched-ops method invocation. - - Signed-off-by: Rohit-PX - -commit 65127137a6ecbeefcd5dc355ff0b261b3e4d5017 -Author: Rohit-PX -Date: Tue Aug 20 16:17:00 2019 -0700 - - Vendor Updates. - - Signed-off-by: Rohit-PX - -commit 9a8e7f56dd1813d1e195a211f69c92b32cc00bb8 -Author: Rohit-PX -Date: Mon Aug 19 23:03:18 2019 -0700 - - Add storage provisioner flag to stork-test. - - Signed-off-by: Rohit-PX - -commit a2450e756afef7868435fd0726e0ae577036f442 -Author: Dinesh Israni -Date: Fri Aug 16 15:37:29 2019 -0700 - - Save the clone volume names after generating so that we can use them on failure - -commit 977657fd2b882139af3aa3a03b57e955a5389d7b -Author: Dinesh Israni -Date: Fri Aug 16 15:36:27 2019 -0700 - - [Portworx] Delete created volume clones on failures - - This ensure that all volumes are created together when retried - -commit 18ccdd37258bbf0c02a26df1ce387d6b76973938 -Author: Dinesh Israni -Date: Tue Aug 13 17:52:16 2019 -0700 - - Remove check for error when refreshing discovery helper - - The library takes care of the error - -commit 961ffa6085e6272afb5365b5caefd816fb976855 -Author: Dinesh Israni -Date: Tue Aug 13 17:46:56 2019 -0700 - - Replace collections helper with modifying objects directly - -commit 49e5c63fcaa6477679ac23786744a3a7c4292a5b -Author: Dinesh Israni -Date: Tue Aug 13 16:42:12 2019 -0700 - - Vendor update - - Update k8s pacakges to 1.11.9 - -commit 974e7a2865b7cb944d9d2da3e874751fb2d84b77 -Author: Rohit-PX -Date: Fri Aug 16 17:05:52 2019 -0700 - - Integration test for label selector. - - Signed-off-by: Rohit-PX - -commit 63408fd629ad1bdab069c13b43d7d7e20919d843 -Author: Ram -Date: Wed Aug 7 22:38:46 2019 +0530 - - Address review comments - - Signed-off-by: Ram - -commit 391e8e24dfb7558f45a1e61395b18e9ac709b977 -Author: Ram -Date: Wed Aug 7 01:40:44 2019 +0530 - - Cleanup restore objects upon CRD delete - - fix restore fails when haUpdate fails for restore vol - - make task add more unique - - Signed-off-by: Ram - -commit ddb55b7ad3aaee28308a7f9cb8416f6e4053554e -Author: Dinesh Israni -Date: Fri Aug 16 23:24:45 2019 -0700 - - Update cassandra version in integration test - - The older version has a bug which could lead to an empty commit log file - which causes issues during restart - -commit 8dff2de6e1abd01319f8469c1b5910910d4c2aa0 -Author: Dinesh Israni -Date: Thu Aug 15 13:07:07 2019 -0700 - - Vendor update for torpedo - - Fixes test issue when creating rules - -commit 2e863970a599d0380243f6bce2cca972c26776d6 -Author: Rohit-PX -Date: Wed Aug 14 17:29:38 2019 -0700 - - Integration tests for pre/post failing rules and spec files. - - Signed-off-by: Rohit-PX - -commit 6ca355dfdc77ae87a56d3cea78b44bf169110a0b -Author: Dinesh Israni -Date: Sat Aug 10 13:56:23 2019 -0700 - - Update version for Deployment and Statefulset - -commit 25a49dfd0136f64a0bca51bbf917fa07bdf6bf47 -Author: Dinesh Israni -Date: Sat Aug 10 13:55:49 2019 -0700 - - Vendor update for sched-ops and torpedo - -commit 168aab918faad8e4c1f6ef78b514ca81ed7c8f75 -Author: Dinesh Israni -Date: Sat Aug 10 13:42:23 2019 -0700 - - Pass in correct namespace when running pre/post exec rules for clone - -commit cf051db8390340eadcfa4d34426f0432ce033176 -Author: Dinesh Israni -Date: Thu Aug 8 23:31:33 2019 -0700 - - Integration tests for application clone with rules and label selectors - -commit fcf8665149b31784cf2927d77f3b9dbb019469aa -Author: Rohit-PX -Date: Sun Aug 11 18:23:57 2019 -0700 - - Integration test for application backup with pre/post exec and missing rule. - - Signed-off-by: Rohit-PX - -commit 4df16d5f9f05fa34846c06af7a8864832c4f26c9 -Author: Ram -Date: Mon Aug 12 19:20:14 2019 +0530 - - Disable CS inplace restore feature - - Signed-off-by: Ram - -commit cf19c5cf197d58ecae43319fda71c138da802019 -Author: Ram -Date: Wed Aug 7 22:42:21 2019 +0530 - - Disable CSRestore integration tests - - Signed-off-by: Ram - -commit 7c56efd2a9c3887f54c109e4e6ca3d09b8aa2972 -Author: Dinesh Israni -Date: Tue Aug 6 17:48:29 2019 -0700 - - Don't return error from schedule controllers if policy is invalid - -commit e73bbbf46645371e9a1d77da983369644c06d9e2 -Author: Dinesh Israni -Date: Tue Aug 6 17:25:05 2019 -0700 - - Update for application backup and schedule - - - For schedule set retain policy to delete by default - - When deleting backup ignore NotFound error for backup location - -commit fa902c4fe37d48f8461269b7ad970bf54a3e6c67 -Author: Dinesh Israni -Date: Tue Aug 6 17:10:40 2019 -0700 - - Add integration tests for application backup schedule - -commit f2be98dff14d40d0847da46d2e01554d03e006a5 -Author: Dinesh Israni -Date: Mon Aug 5 16:44:31 2019 -0700 - - [Portworx] Add version checks for application backup and snap restore - -commit 66c87147d31988a21d599f272232831d63a818b2 -Author: Dinesh Israni -Date: Mon Aug 5 16:36:43 2019 -0700 - - storkctl subcommands for ApplicationBackupSchedule - -commit 755325f0c1e7e4dd3546cde3b3149cfca94ad89d -Author: Dinesh Israni -Date: Mon Aug 5 16:36:24 2019 -0700 - - Vendor update for sched-ops - -commit 19da95bd7280434afa8455f027ffdd6b6ba7c6ca -Author: Ram -Date: Fri Aug 2 16:14:39 2019 +0530 - - Validate snapshot for restore - - signed-off-by: ram - -commit 1621151c727ae7a7e13f8956ab544735ca4647fe -Author: Rohit-PX -Date: Tue Jul 30 17:17:58 2019 -0700 - - Ability to add environment variables to be added to stork deployment in integration tests. - - Signed-off-by: Rohit-PX - -commit c4177fe98e3c1dea241477fc633e052b1f2feb98 -Author: Ram -Date: Wed Jul 31 11:56:08 2019 +0530 - - Move snapshotrestore test before migration - - Signed-off-by: Ram - -commit 3590d42dd351d03c44240ab8f02094f0198b9dc6 -Author: Rohit-PX -Date: Tue Jul 23 13:46:05 2019 -0700 - - Comment tests to test out the job. - - Signed-off-by: Rohit-PX - -commit a9f90d61a8e0070da79d2f1ebff445382400958e -Author: Dinesh Israni -Date: Fri Jun 14 15:29:38 2019 -0700 - - Add field in BackupLocation to specify if backups should be synced - -commit f9424c1e5a27b843b523db3ba8bd4c93185d416a -Author: Dinesh Israni -Date: Wed Jun 12 15:06:36 2019 -0700 - - Add TriggerTimestamp to ApplicationBackup - - Useful to keep creation time when backup objects are restored to another cluster - Also setting the default namespace mappings during restore if none are provided - -commit 3d3b2fbdc1e4f1d0afcd94a5ac3e5bb33bd8c934 -Author: Dinesh Israni -Date: Thu Jun 6 13:10:59 2019 -0700 - - Add controller to sync backups - - - Scans all the backupLocation for backups - - If a backup from the backupLocation doesn't exist on a cluster create one with the - format - in the namespace. Scheduled backups - retain their original name since they already have a timestamp in the name - - The ReclaimPolicy it set to Retain for the synced objects so that deleting - them doesn't delete the backup from the BackupLocation - -commit 70e52e0195ff52371970ed6f5ef538b7b9bc684e -Author: Dinesh Israni -Date: Mon Jun 3 18:19:20 2019 -0700 - - Destroy all contexts before waiting in integration test cleanup - -commit ac984c2d98ec36cd66fb5bbeaa2b3697ffa109e7 -Author: Dinesh Israni -Date: Wed Jul 24 16:48:53 2019 -0700 - - Add unit tests for health monitor - -commit 4b7941828e0afce2d9996979d6a0fd6ca080f3db -Author: Dinesh Israni -Date: Wed Jul 24 16:44:37 2019 -0700 - - [Monitor] Delete pods from node if it is in any phase - - A pod could be scheduled and the storage driver on the - node could go offline after that causing the pod to be - stuck in Pending state - -commit 0cfb3632eb514bd7967e9255d52d2cf200f5193f -Author: Dinesh Israni -Date: Wed Jul 17 14:25:36 2019 -0700 - - Add API to resourcecollector package to delete objects - - This can be used when resources need to be deleted before creating them again. - Deletion needs to be done for all the objects beforehand because there could be - resources that have dependencies on other resources. For example, a PVC can't be - deleted if a pod is using it. - -commit c6cd98371924068bf34158131fc72d784a21886b -Author: Ram -Date: Mon Jul 29 12:24:03 2019 +0530 - - Vendor updates torpedo - - Signed-off-by: Ram - -commit d9d40ac13184a6dcbbe37b341561d9cd4fcd8266 -Author: Ram -Date: Fri Jul 26 23:35:27 2019 +0530 - - Use addTask instead of Schedule for creating group snap - - correct api WaitOnDriverUpOnNode api - - Signed-off-by: Ram - -commit 68963cd193e39d076030e55976871dfb34fd11ab -Author: Ram -Date: Thu Jul 25 00:16:17 2019 +0530 - - Add integration test for snapshot restore - - grouplocalsnapshot restore - - cloudsnapshot restore - - addressed review comment - - Signed-off-by: Ram - -commit 3b2fa929da49d3f1d5abc7b22815fb1ab439b47a -Author: Ram -Date: Fri Jun 28 00:40:30 2019 +0530 - - Basic integration test for snapshot in place restore - - Vendor changes for inplace restore intergration tests - - Fix namespace for inplacerestore crd - - Vendor update for sched-ops, torpedo - - Signed-off-by: Ram - -commit 70f6a019ae327ec8f9cd6b29bb7377699ee6d56e -Author: Ram -Date: Mon Jul 29 11:38:43 2019 +0530 - - Address review comments - - Signed-off-by: Ram - -commit 9645ce32ed61bd67d957a191a837f9e324fae087 -Author: Ram -Date: Fri Jul 26 01:37:44 2019 +0530 - - Add retry for voldriver_restore api - - send proper rs while ha_update - - handle reconsiler failure condition properly - - use updatePVC sets - - Signed-off-by: Ram - -commit bb657322388b8e709571842c1066760a14bbdaf0 -Author: Rohit-PX -Date: Tue Jul 9 17:15:43 2019 -0700 - - Update Gopkg.toml for dep failures. - - Signed-off-by: Rohit-PX - -commit e9cfbbb1573f0fdd244467c40ae153ce562bf207 -Author: Rohit-PX -Date: Wed Jun 12 12:09:47 2019 -0700 - - Integration test for backup/restore. Replace policy - Delete. - - Signed-off-by: Rohit-PX - -commit d9deefc8fdeea25008f7f61f5a1921e5cf0dc3f6 -Author: Dinesh Israni -Date: Tue Jul 16 14:41:36 2019 -0700 - - Add an integration test for application clone - -commit 65b79302f45bd7e869a06754753e016e516bab0c -Author: Dinesh Israni -Date: Tue Jul 16 14:40:29 2019 -0700 - - ApplicationClone: Create destination namespace if it doesn't exist - -commit 883352abc1f8439d05aaa52c591c43e3f9c26da8 -Author: Dinesh Israni -Date: Mon Jul 15 16:07:50 2019 -0700 - - Vendor update for sched-ops and torpedo - -commit 5b48f8e27616baa6bb5d8376ac0b6a8b1d127d06 -Author: Dinesh Israni -Date: Fri Jul 26 13:36:15 2019 -0700 - - Update permissions for stork-role - - Needed for application restore - -commit 5c904f33a5733fe62258fde9a3a25c8ed6129df9 -Author: Dinesh Israni -Date: Mon Jul 22 15:07:33 2019 -0700 - - [Portworx] Add labels to cloudsnap for application backups - -commit ebb387d43d4e429462a02b29b3d735c042d21aa3 -Author: Dinesh Israni -Date: Tue May 21 14:57:11 2019 -0700 - - Add controller for application backup schedule - -commit 987c6b555c31462f4201d04ab558fc0ec059a2d2 -Author: Dinesh Israni -Date: Mon May 20 17:08:05 2019 -0700 - - Add CRDs for ApplicationBackupSchedule - -commit bad6ba7adb6a063e4d1d952eb2c45fdfe5f9793d -Author: Dinesh Israni -Date: Thu Jul 25 13:41:10 2019 -0700 - - Fix duplicate imports - - Was causing the latest staticcheck to fail - -commit cb94241e576eab5040957b31e7848ffba218f907 -Author: Dinesh Israni -Date: Mon Jul 22 19:16:38 2019 -0700 - - Validate name and namespace when generating clusterpair - -commit 4a04e421eaf8c1f99e3620aac35985611b6da7a3 -Author: Luis Pabón -Date: Fri Jul 12 14:38:05 2019 -0700 - - Update vendor - -commit 9cce69a03693449c3b495b2a46d86be6547e8e0f -Author: Luis Pabón -Date: Mon May 13 13:59:07 2019 -0700 - - CSI Support - -commit e8a3b7847fab86de6a2d075eac7a18d6db8948b8 -Author: Dinesh Israni -Date: Wed Jul 17 18:37:04 2019 -0700 - - Add -t to test2json to add timestamps - -commit 00d9152fefbf43ec5fe7988e902a31b38dff103f -Author: Dinesh Israni -Date: Thu Jul 11 15:02:27 2019 -0700 - - Store snapshot schedule info in annotations instead of labels - - Values in labels have a smaller length limit and the info should be an - annotation anyways - - Fixes #415 - -commit cd9f704867586af2b7e0bf65b9af303536d952a8 -Author: Dinesh Israni -Date: Wed Jul 10 15:46:29 2019 -0700 - - Allow users to specify annotation in pod if only local node should be preferred - -commit 00a97d76a7c7ca4e532cb26689e7378453c3ae9b -Author: Dinesh Israni -Date: Mon Jul 1 13:00:58 2019 +0400 - - When uploading objects don't close writer async - - It can return errors which should be reported back - -commit 9181c724b6d73e719359e1b286afca0aaf54247c -Author: Ram -Date: Thu Jun 27 14:27:00 2019 +0530 - - Fix correct restore volume name in log - - add volumeId in info log - - Signed-off-by: Ram - -commit 274376b5342560c9ae1279e11e8035cbe2172e18 -Author: Ram -Date: Thu Jun 13 22:53:59 2019 +0530 - - Review comments - - handle repl update of volume - - delete orphaned objects once restore completes - - remove restoreType from CRD - - fix unique restore TaskID - - Signed-off-by: Ram - -commit 5588558a2c858366130fc13bb6947e2090668277 -Author: Ram -Date: Wed Jun 12 22:30:15 2019 +0530 - - Rearrange snapshot restore controller - - prepare restore objects before in place restore - - check status of snapshots restore objects - - keep restorevol map and pvc list with snapshot restore - CRD - - Signed-off-by: Ram - -commit 98ca860a8abade8d55ab512ed3b209ed3861fc03 -Author: Ram -Date: Wed Jun 12 01:14:04 2019 +0530 - - Add preparation stage for in-place snapshot restore - - Signed-off-by: Ram - -commit 89a2d7d08a7d7196f4bc3cbccd9c887a756acfb0 -Author: Dinesh Israni -Date: Thu Jun 20 10:33:19 2019 +0400 - - Don't skip delete for PV and PVCs when replacing - -commit 8587c17d87c3f9e31daeccdb279faf7f05103dd5 -Author: Harsh Desai -Date: Fri May 31 17:04:50 2019 -0700 - - [Portworx] Fail Portworx driver init if service doesn't have required ports - - Signed-off-by: Harsh Desai - -commit 28ff69936f09b5de52e308deae902c1035af11b8 -Author: Dinesh Israni -Date: Thu Jun 13 16:49:48 2019 -0700 - - Create annotations to store migration replicas if it doesn't exist - -commit 541a0f8afc1bb50d71c34053f60846e8203595a5 -Author: Dinesh Israni -Date: Thu Jun 13 14:33:28 2019 -0700 - - Ignore errors from discovery helper for aggregate APIs - -commit c4b4526c86e825fedbf50fc3b3835b90553a6d01 -Author: Dinesh Israni -Date: Thu Jun 13 16:38:37 2019 -0700 - - Add support to collect Template objects - -commit 0300b1ba34ec8a393fe0a0bb8e5ba5459da50831 -Author: Dinesh Israni -Date: Tue May 28 19:55:16 2019 -0700 - - Ignore error in storkctl if deploymentconfig type isn't found - -commit fdeaf4557546fa0222fa779a001033e1ec6706f0 -Author: Piyush Nimbalkar -Date: Wed Jun 12 16:22:06 2019 -0700 - - Remove storage cluster CRD and controller - - Signed-off-by: Piyush Nimbalkar - -commit 603f5f576fb2a6b84f0c9afd1399802c538f6cf4 -Author: Dinesh Israni -Date: Mon Jun 10 17:17:27 2019 -0700 - - storkctl subcommands for backuplocation - -commit 1d50a5c6ac67372be4a3dd7f68031cc6c78b6ae2 -Author: Dinesh Israni -Date: Mon Jun 10 17:23:39 2019 -0700 - - Vendor update for sched-ops - -commit a302011186a76ef7c17b8d7176b37370ffd69b1c -Author: Dinesh Israni -Date: Thu Jun 6 15:58:23 2019 -0700 - - storkctl subcommands for application clone - -commit 32ee7104f6da915108a7ab9ce87a1044f53f0f18 -Author: Dinesh Israni -Date: Thu Jun 6 14:31:27 2019 -0700 - - storkctl subcommands for applicationbackup and applicationrestore - -commit 4e47e815ee834d6f6c1624aa6494f7394f0c7462 -Author: Ram -Date: Mon Jun 10 23:27:14 2019 +0530 - - sched-ops vendor update - - Signed-off-by: Ram - -commit 8a907b88c09fb20c02dc4cb40f952f5b8fbefee2 -Author: Ram -Date: Tue Jun 4 15:59:28 2019 +0530 - - Storkctl support for in-place restore - - - Pretty print snapshot-restore output - - Add UT's for storkctl snapshotRestore - - Review comments - - Signed-off-by: Ram - -commit 0525e3e34e1917abefb01eeae7df412bd9ab6956 -Author: Ram -Date: Tue Jun 4 14:47:04 2019 +0530 - - Add unit test for restore check in extender_test - - adjust ut's to create pvc - - address review comments - - Signed-off-by: Ram - -commit 326292106f8a84a9e8ba237dfb0ede2392d51000 -Author: Ram -Date: Fri May 31 19:18:51 2019 +0530 - - Add stork scheduler check for in-place restore - - - address review comments - - nil check for pvcclaim - - Signed-off-by: Ram - -commit 9641642969ca2aa7bdd446680efbc1c97187dddd -Author: Ram -Date: Thu May 2 01:18:26 2019 -0700 - - Add controller for In-place snapshot restore - - Fix go-lint errors - - Review comments - - Generated files for changed volumesnapshotCRD's - - Restore snapshot where pvc is in use by pods - - Move controller specific login from portworx driver to snapshot restore - controller - - Generated files by codegen - - Review comments - - Signed-off-by: Ram - -commit 88c5d54ec2676d38c90fbfa3d8dd66ce2a451a70 -Author: Ram -Date: Thu May 2 01:16:32 2019 -0700 - - Add CRD for In-place volume snapshot restore - - codegen generated files - - Add controller for In-place snapshot restore - - Add support for local groupsnapshot in-place restore - - Fix go-lint errors - - Review comments - - Check restore status before calling driver's snapshot restore - - codegen generated files - - Signed-off-by: Ram - -commit ee02a051e0d487339d09d3acaf495f4971d9f1ed -Author: Dinesh Israni -Date: Thu May 30 20:44:35 2019 -0700 - - Update staticcheck for integration test - - It throws an error when giving the package name for some reason now - -commit 5c7a76edb581ea6031513e78e60817d1178a8488 -Author: Dinesh Israni -Date: Wed May 29 22:03:14 2019 -0700 - - Fail migrations if local domain is inactive - -commit ec80b4110e13e59d54e210240ed8a35b6f144027 -Author: Dinesh Israni -Date: Thu May 30 20:04:04 2019 -0700 - - Update permissions for all stork resources - -commit 988a58993dafc844f051fb7d2dd7f29632a617ad -Author: Dinesh Israni -Date: Wed May 29 22:04:11 2019 -0700 - - Update migration behavior for PVs - - - Set reclaim policy to Retain if volumes aren't being migrated - - Update PV if it already exists instead of deleting and creating - -commit 3633b8d73a47ffa3d4bc0ca84349d6a6d4a13b79 -Author: Dinesh Israni -Date: Wed May 29 22:03:34 2019 -0700 - - Portworx: Return empty clusterdomain list if not set - -commit 31f54fd09fa55e771a677a1d1eec825dfd5febbd -Author: Aditya Dani -Date: Wed May 29 17:43:05 2019 -0700 - - Portworx ClusterDomainsStatus: Do not fail on volume enumerate errors. - - - Add a new state SyncStatusUnknown when the driver fails to fetch the sync status. - -commit 12554e96b5f3ec6b4c0d0aac93726643b163e7b4 -Author: Aditya Dani -Date: Tue May 28 18:41:22 2019 -0700 - - integration test: Use remote cluster to activate/deactivate domains - -commit d1e467829e95c23c2d55fca861bd51300061159b -Author: Dinesh Israni -Date: Fri May 3 22:19:33 2019 -0700 - - Fix type for annotation to not collect resources - -commit a80f31e6c39624f529102ba608e1b32edfb6e2dd -Author: Dinesh Israni -Date: Thu May 2 23:20:59 2019 -0700 - - Controller for ApplicationClone CRD - - - ApplicationClone objects are only allowed to be created in the admin namespace - for now - - First the volumes for all the PVC are cloned - - Then the resources are copied from the source to destination namespace - - ClusterRole is not copied since it is a cluster scoped object - - ClusterRoleBinding is merged to have same binding for both namespaces - -commit 6f66e799199af79ee0dbb796475c62cf79487be9 -Author: Dinesh Israni -Date: Thu May 2 23:18:29 2019 -0700 - - Add interface in volume driver to Clone Volumes - - Also added implementation for portworx driver - -commit d8f7f2af82fb40354341887a154daf748728f762 -Author: Dinesh Israni -Date: Fri May 17 18:42:57 2019 -0700 - - Add package to encrypt/decrypt using AES - -commit 004a47fbf98625b528582f96f4320e1ba0a11f52 -Author: Dinesh Israni -Date: Fri May 17 18:41:15 2019 -0700 - - Update backup and restore controllers to use encryption key - - EncryptionKey is used up from the BackupLocation object - -commit 30b95a6a02f38d7624827b395df46acd4ed86096 -Author: Dinesh Israni -Date: Wed May 1 15:28:49 2019 -0700 - - Vendor update for new dependencies - -commit 29546a4b5dc55003d1943d70f79dea43ad61cac1 -Author: Dinesh Israni -Date: Thu May 9 14:34:34 2019 -0700 - - Print stork image with `storkctl version` command - -commit 43128581471a0f81efdc0c683477d0d68701157b -Author: Dinesh Israni -Date: Thu May 9 14:33:27 2019 -0700 - - Update stork parameters - - Don't print defaults, it is printed automatically - Set kube-system as the default admin namespace - -commit d4356d08d62c742e11be714f512c6d61aa7d0d89 -Author: Dinesh Israni -Date: Fri May 3 15:13:44 2019 -0700 - - Add controllers for application backup and restore - - - Backup and restore is triggered for the volume first followed by the resources - - The location of the backup is specified by a BackupLocation - - The volume driver stores the volume backup in its format - - The resources are stored under /// - - This path is stored in the backup object - - An ApplicationRestore object needs to refer to an ApplicationBackup that it - wants to restore from - -commit c32f94c9fc24dd35ffb1dd2a43f786527eea3588 -Author: Dinesh Israni -Date: Wed May 1 16:20:58 2019 -0700 - - Add interface to volume driver to backup and restore volumes - - Also added implementation for Portworx driver - -commit 0a696e689069f7a928cfe585cef65665f5c26bec -Author: Dinesh Israni -Date: Wed May 1 16:23:36 2019 -0700 - - Add package for objectstore abstraction - - The API takes a BackupLocation object and return a bucket handle - which can be used for CRUD operations - -commit 38b071f7abc0e907b7238a67aa15cf4ce1791ea1 -Author: Aditya Dani -Date: Mon May 27 17:37:59 2019 -0700 - - Update dep versions for sched-ops and torpedo packages - - Signed-off-by: Aditya Dani - -commit 5841c29b9adfecb9f876908b81e8db2e8cc82f34 -Author: Dinesh Israni -Date: Mon May 27 10:54:38 2019 -0700 - - Add suspend and resume subcommands to storkctl - -commit 8a0d552989bb585b61a9266cecc5437ad0de2d85 -Author: Aditya Dani -Date: Sun May 26 00:14:07 2019 -0700 - - vendor updates for sched-ops, torpedo and talisman - -commit 19f0f469468a2578f3c556eb655522c49612d67b -Author: Aditya Dani -Date: Mon May 27 16:01:42 2019 -0700 - - Modify storkctl to show ClusterDomain's sync status - - - Use the updated ClusterDomainsStatus CRD and fetch the SyncStatus from it - - Modify storctl UTs - -commit 3255eccc342aef3a6fd7c798720cd2004f0d2bcd -Author: Aditya Dani -Date: Sun May 26 14:51:41 2019 -0700 - - Portworx: Determine cluster domain's sync status based on volume replicas - - - Modify Portworx's GetClusterDomains API - - Enumerate all portworx volumes and based on the current and create replica set - determine if a volume is in resync. - - Determine a cluster domain's sync status based on resyncing volumes and their nodes. - - Update the ClusterDomainsStatus controller to use the modified CRD. - -commit 8338eb362300bb34e4846b976149ceea82c69150 -Author: Aditya Dani -Date: Sun May 26 00:06:17 2019 -0700 - - Modify ClusterDomainsStatus CRD - - - Remove Active and Inactive lists - - Add a ClusterDomainInfo object under Status - - Add new SyncStatus that indicates whether a cluster domain is in sync with other - cluster domains. - -commit c1406b3acc7eab1a49b4b756af72e14985fa4e05 -Author: Ram -Date: Mon May 27 23:26:53 2019 +0530 - - Add wait options for storkctl create migrations - - Signed-off-by: Ram - -commit 1f61136389475fcf5fe195d45934b36b46f07c27 -Author: Ram -Date: Fri May 24 22:20:37 2019 +0530 - - Add wait poll support for storkctl activate clusterdomain - - Signed-off-by: Ram - -commit fe496a5ced7a7a90349b6c5693f6be96629bf669 -Author: Dinesh Israni -Date: Fri May 24 19:41:09 2019 -0700 - - Print N/A for volumes and resources when not migrating them - -commit f2e070eac4fc7dbd7609f8445730453122b507ed -Author: Dinesh Israni -Date: Thu May 23 18:03:23 2019 -0700 - - Create some default schedule policies - - Also update strokctl to use a default policy for migration schedule - -commit 8dd0ce7ed3c5d1690b88e6fe545bc43c6ece3a16 -Author: Dinesh Israni -Date: Wed May 22 16:54:30 2019 -0700 - - Optimize migrations for clusterrole - - For clusterrole, get a list of crbs initially and use that to check if migration - is required - - Also skip deployer and builder service accounts which are automatically created - on OCP - -commit 745b5402bd3840a2c9cc1236ef014023c1cc123a -Author: Harsh Desai -Date: Tue May 21 14:42:48 2019 -0700 - - [Portworx] add idempotency for local snapshots - - Signed-off-by: Harsh Desai - -commit 6824155a70d0af3da694bd3ebef9786506fab041 -Author: Dinesh Israni -Date: Wed May 22 15:27:46 2019 -0700 - - Suspend migration schedules if the local clusterdomain is inactive - -commit 2e7d881098d6810e54572f702db58d0402b37407 -Author: Dinesh Israni -Date: Wed May 22 15:11:00 2019 -0700 - - Trigger update for cluster domain status when updating domains - -commit ee5a514e89ffd7fb9a95cb84352f2de7823ba490 -Author: Dinesh Israni -Date: Wed May 22 14:28:18 2019 -0700 - - Portworx: Populate local domain in cluster domain status - -commit 3b3b88ad83c66e3982e52d991d3988b544f50327 -Author: Dinesh Israni -Date: Wed May 22 15:01:17 2019 -0700 - - Add local domain to clusterdomain status - - Also updated storkctl to print the local domain - -commit dabd39289000cd0627c5d357e97c17e8546549d0 -Author: Dinesh Israni -Date: Tue May 14 15:32:34 2019 -0700 - - Fix schedule to trigger if next trigger is exactly at current time - - Also added UT - -commit 9ae8cbc32804087dde87c78b25b7ac67e402e843 -Author: Dinesh Israni -Date: Mon May 20 22:33:58 2019 -0700 - - Revert "Use correct spec for extender integration test" - - This reverts commit 27b8690fde2068f3cbbc29ad48c42f7d4f44e61f. - -commit 27b8690fde2068f3cbbc29ad48c42f7d4f44e61f -Author: Dinesh Israni -Date: Mon May 20 14:31:42 2019 -0700 - - Use correct spec for extender integration test - -commit 068f895d4fe93f5ef1b2160f4b247b6d88ac7f1d -Author: Dinesh Israni -Date: Thu May 16 17:50:12 2019 -0700 - - Add deploymentconfig to activate migrations for storkctl - -commit f6a79a813f095495cc5787392d5f06225797bc61 -Author: Dinesh Israni -Date: Sat Apr 20 14:57:37 2019 -0700 - - Add application manager package - - This will start the controller for all the application specific operators - -commit 35fc3c825d6a0d10f6b06793b6bbc0c936bd476d -Author: Dinesh Israni -Date: Mon Apr 15 18:20:55 2019 -0700 - - Add CRDs for BackupLocation - - Used to specify the objectstore where backups can be stored. - Location is kept generic enough so as to allow non-objectstore targets to be - specified in the future. - The config can be provided inline or through a secret. - - Issue #284 - -commit 64e7b605394a7891ac86ab9e2bb69568750ac4ef -Author: Dinesh Israni -Date: Mon Apr 15 15:17:47 2019 -0700 - - Add CRDs for Application Backup and Restore - - Issue #284 - -commit 96ea49d2be33a9e965d6f34a982ecb285e3d46e6 -Author: Aditya Dani -Date: Mon May 13 18:21:28 2019 -0700 - - vendor update from openstorage - - - Fix openstorage pkg/grpcserver memory leak when grpc endpoint is incorrect. - -commit dbcc8060c420dbe8dc9c94ff9731bf6a1708881b -Author: Dinesh Israni -Date: Mon May 6 14:38:25 2019 -0700 - - Add debug package to dump profiles - - SIGUSR1 can be used to dump memory and goroutine info - SIGUSR2 can be used to toggle collection of cpuprofile - -commit bbf85035f7263ab99d1244aa1b35332a9c6c3ff8 -Author: Dinesh Israni -Date: Mon May 13 18:28:46 2019 -0700 - - Fix matching of clusterdomain list - -commit c5e9722ba34b35729049d28212b333ec377660bd -Author: Aditya Dani -Date: Mon May 13 11:21:08 2019 -0700 - - Print an error log when the controller fails to fetch cluster domain info. - - Signed-off-by: Aditya Dani - -commit bf4c7b2c7d41d41a80cd1929abe563db21b37ae4 -Author: Rohit-PX -Date: Fri May 10 18:20:03 2019 -0700 - - Use standard-verbose for gotestsum output. - - Signed-off-by: Rohit-PX - -commit a97aef1ccaeee4c8d409a62faf3b7de2b4e79746 -Author: Dinesh Israni -Date: Thu May 9 19:25:28 2019 -0700 - - Update vendor dependency - -commit 2ead79c6fb794609b89520943bdad30194f0bd05 -Author: Dinesh Israni -Date: Thu May 9 19:24:46 2019 -0700 - - Add support to collect some additional resources - - - Role - - RoleBinding - - Ingress - - Also deleting loadBalancerIP from service resource - -commit 5434fb612d1075a6767a217b1e7eae4279938762 -Author: Rohit-PX -Date: Thu May 9 13:29:34 2019 -0700 - - Use gotestsum for stork test. - - Signed-off-by: Rohit-PX - -commit 2f53393411a0fd14bc1cca4acf2b3f5ffff53004 -Author: Dinesh Israni -Date: Wed Apr 24 19:45:35 2019 -0700 - - Update google cloud sdk and use python3 in container - - Also update some python libraries - -commit 4a52ec8419ceac20e133cfb9d69398a95fde8be4 -Author: Dinesh Israni -Date: Tue May 7 14:47:28 2019 -0700 - - Add field in migration spec for admin cluster pair - - This can be used to migrate cluster scoped resources if an admin doesn't want to - provide access for those to individual users. The admin cluster pair needs to be - created in the admin namespace. - -commit 17485bdf10df161d0448d9ad17db4c936fbb35fa -Author: Dinesh Israni -Date: Tue May 7 13:33:57 2019 -0700 - - Update node info to have storageID and schedulerID - - Also update portworx driver to populate both for the nodes - -commit be02a94a680f8a424a242f71d3bf2b1aeed5cbbb -Author: Dinesh Israni -Date: Mon May 6 14:41:17 2019 -0700 - - Vendor update for sched-ops - - Fixes a memory leak when watches are re-established - -commit e3f9d69395b7cd8d8e7566cfbb6318fbaf8f1b17 -Author: Dinesh Israni -Date: Thu May 2 15:58:49 2019 -0700 - - Add support to collect some ocp resources - - New supported resources are - - DeployemtConfig - - ImageStream - - Route - -commit 66189880a3f16478575144d3ed9c025b21d7a869 -Author: Dinesh Israni -Date: Thu May 2 15:58:15 2019 -0700 - - Vendor update for DeploymentConfig - -commit 286c9dea6e4e54752102c5397c64b0178c5e6653 -Author: Dinesh Israni -Date: Wed May 1 17:47:33 2019 -0700 - - Vendor update for sched-ops - -commit 94dff687c8a48895df3ed1c108304cf983241447 -Author: Dinesh Israni -Date: Wed May 1 16:23:10 2019 -0700 - - Update watch API in health monitor - -commit e32745c8f534a33b61185991bae37288d75c140b -Author: Dinesh Israni -Date: Mon Apr 15 20:01:10 2019 -0700 - - Move resource collection logic from migration controller - - * Can be used by other modules to get unstructured objects from a namepsace - and matching label selectors - * Added support to collect clusterrolebindings inlcuding users and groups for a - namespace - * Added merging of clusterrolebindings when applying resources - -commit 79c82f809f739aea8645fdc0f1faa7fdfc505aa2 -Author: Dinesh Israni -Date: Thu May 2 14:12:18 2019 -0700 - - Increase wait time in snapshotschedule test - - Gives it enough time to wait for the trigger and status to be updated - -commit a529afe888ac3bf6b6451fc1fe3bb73ea48784f4 -Author: Dinesh Israni -Date: Thu Apr 25 16:35:57 2019 -0700 - - Upload storkctl from master branch to master path instead of latest - -commit 43cb1fd905460b6797e68684c96754cdf3dbcf36 -Author: Tapas Sharma -Date: Wed Apr 17 13:03:52 2019 -0700 - - Define CRD for application cloning - 0. This checkin defines the first level CRD for cloning applications - 1. Added replace policy to the spec - 2. Added ApplicationClone and ApplicationCloneList to register.go - 3. Added ResourceInfo and VolumeInfo to the status of clone - 4. Removed Namespace from the volumeInfo and resourceInfo - - Signed-off-by: Tapas Sharma - -commit fd28b56985709e0effa9e297f224089ec1cdebaa -Author: Dinesh Israni -Date: Sat Apr 20 14:58:51 2019 -0700 - - Remove the test directory from google cloud sdk - -commit 4082186164c46157a19d7eb3001026d586ba3cb5 -Author: Dinesh Israni -Date: Sat Apr 13 20:28:23 2019 -0700 - - Switch gosimple to staticcheck and fix errors - - Also added static analysis checks for unit and integration tests - Added gocyclo target in Makefile but not enabled until we fix the issues - - Issue #287 - -commit 4aab57ef7aa302767244bac7cbb3254ba7300229 -Author: Dinesh Israni -Date: Fri Apr 12 19:22:30 2019 -0700 - - Add events to pod from extender in case of errors - -commit 8ed66053a3020211573ca4466a880c26b1bec257 -Author: Dinesh Israni -Date: Mon Apr 15 21:34:37 2019 -0700 - - Bump version to 2.3.0 - -commit 10a46387bce2dd3176780a339154ca9c9e58dfc6 -Author: Dinesh Israni -Date: Mon Apr 15 21:34:21 2019 -0700 - - Revert "Update travis for 2.2 branch" - - This reverts commit 392c882377a6c0efbe3fe3e56f9800460ad8f236. - -commit 392c882377a6c0efbe3fe3e56f9800460ad8f236 -Author: Dinesh Israni -Date: Wed Jan 30 19:23:58 2019 -0800 - - Update travis for 2.2 branch - -commit 5a0eac79d2bfeb1207a905b8690d66157b9d0d8f -Author: Aditya Dani -Date: Sun Apr 14 17:59:34 2019 -0700 - - vendor updates from sched-ops - -commit 6547b18b705e92d0e6e93d0bcffe5a3a55e87954 -Author: Aditya Dani -Date: Sun Apr 14 17:58:53 2019 -0700 - - New specs for cluster domain integration tests - -commit 2ec3df6edf65b60f5468128ee54d53d7d929510b -Author: Aditya Dani -Date: Sun Apr 14 17:59:09 2019 -0700 - - Add integration tests for ClusterDomains. - -commit 89ed743b9df156b55af9e587e7a6cb000bffc4dc -Author: Dinesh Israni -Date: Sat Apr 13 09:42:30 2019 -0700 - - Validate PVC before checking for auto created schedule - -commit 31d35f9ca641d398f6be88e91e98f611232bebe5 -Author: Aditya Dani -Date: Fri Apr 12 18:46:38 2019 -0700 - - Normalize the clusterID before using it is a kubernetes resource name for ClusterDomainsStatus - -commit 5d9037d078ebb26eb9bf9f76043637a342760138 -Author: Dinesh Israni -Date: Fri Apr 12 13:06:23 2019 -0700 - - Add integration test for snapshotschedule created using storageclass - -commit 24c788d5492bb29cdb6c7cb62ede3bc2913b0cdc -Author: Dinesh Israni -Date: Fri Apr 12 16:51:25 2019 -0700 - - Return nil when clusterpair is deleted without storage options - -commit f7d64f815e51190a576984a02952b3b9c7deecee -Author: Dinesh Israni -Date: Fri Apr 12 16:08:13 2019 -0700 - - [Portworx] Return error if starting migration fails - - Previously the status was being set as failed. Returning error - ensures that the migration will be retried and events will be raised - -commit b357a1f4c8f21107929cb763b2d74ccda8197a54 -Author: Dinesh Israni -Date: Fri Apr 12 12:33:22 2019 -0700 - - Fix name in permission for cluster domain status object - -commit 63a164d98b11b59b51c73e697acdd26f44db8309 -Author: Dinesh Israni -Date: Wed Apr 10 20:18:26 2019 -0700 - - Set default reclaim policy for snapshotschedule - - Also use time from schedule pacakge for creation time to help with integration - test - -commit de8bfc15b077ed011072c0bd0b19862c1ecf2e5a -Author: Dinesh Israni -Date: Wed Apr 10 20:17:57 2019 -0700 - - Vendor update for sched-ops - -commit 80c792294d4602ebe236ba024f753a40098fc1c5 -Author: Dinesh Israni -Date: Mon Apr 8 12:33:34 2019 -0700 - - Integration tests for snapshot schedules - -commit 0b5389ad65e1f0fc7c511380838f8ae6061348b9 -Author: Grant Griffiths -Date: Wed Apr 10 17:00:17 2019 -0700 - - [Portworx] Add IATSubtract option to auth token options - - Signed-off-by: Grant Griffiths - -commit e18b842d07343fe74bdb64b41775c7d62458b630 -Author: Grant Griffiths -Date: Wed Apr 10 16:59:34 2019 -0700 - - Openstorage vendor update - - Signed-off-by: Grant Griffiths - -commit 5b81697e06891e184c87828052175a51b1d636a0 -Author: Dinesh Israni -Date: Wed Apr 10 09:30:36 2019 -0700 - - Add permissions for clusterdomain CRDs - -commit 61ac6b416c81dbaaabac1635c8b19fb0c77baad7 -Author: Dinesh Israni -Date: Mon Apr 8 19:10:35 2019 -0700 - - [Portworx] Take full cloudsnap for weekly and monthly schedules - -commit 1681683289f2ab5705f4c71012fa3388ca92a210 -Author: Dinesh Israni -Date: Mon Apr 8 18:38:59 2019 -0700 - - [Portworx] Fix cloudsnap status check - - If status is present in VolumeSnapshotData return that, - else use the taskID to query the status instead of the volumeID - -commit c55607d93f483087c3ea4fca8e1aeadcf3ef9581 -Author: Dinesh Israni -Date: Mon Apr 8 18:38:21 2019 -0700 - - [Portworx] Add owner information to cloudsnaps - -commit eb8fd1b2a6981a0c95dd078fce6629c101f03c61 -Author: Dinesh Israni -Date: Mon Apr 8 18:36:38 2019 -0700 - - Add labels to scheduled snapshot descrbing the schedule - -commit 4eed9a6c422ee46da39b17f3dbf05f2b47fa9413 -Author: Dinesh Israni -Date: Mon Apr 8 18:35:54 2019 -0700 - - Vendor update for snapshot from external-storage - -commit 097d4b1f62335b96e27cbfa9957c40b2d55c9601 -Author: Aditya Dani -Date: Tue Apr 9 17:30:28 2019 -0700 - - Ignore already exists error when creating clusterdomainsstatus CRD - -commit ffeddfbfaafa8baaa0d6dc415742d708953af80d -Author: Aditya Dani -Date: Tue Apr 9 17:18:26 2019 -0700 - - Portworx Driver: Use cluster.Enumerate instead of cluster.Uuid. - -commit e3f339e07195dc706cb403b870ca8b341efe7684 -Author: Aditya Dani -Date: Mon Apr 8 13:34:54 2019 -0700 - - Portworx: Return an error when clusterID is empty - - Signed-off-by: Aditya Dani - -commit ea22ac2257f05a98eb549e75c275537ca63599b1 -Author: Dinesh Israni -Date: Wed Apr 3 22:29:15 2019 -0700 - - [Portworx] Only check version for online nodes - -commit 984d15b4c35019620355708345af68f39f93f163 -Author: Dinesh Israni -Date: Wed Apr 3 22:28:43 2019 -0700 - - [Portworx] Add parsing for cluster pair mode - -commit cd421d22267464757a07362ca4c71c0ae289dabd -Author: Dinesh Israni -Date: Wed Apr 3 22:28:16 2019 -0700 - - Vendor update for openstorage - -commit a33ab6d5f921011627b31e8d2ffee09adeb15320 -Author: Dinesh Israni -Date: Wed Mar 27 23:14:13 2019 -0700 - - [Portworx] Mark canceled migrations as Failed - -commit 0c9f5e45383072e88d09950e7f23dffd5e60a683 -Author: Luis Pabón -Date: Sun Mar 31 19:54:01 2019 -0700 - - Vet/Lint issues - -commit 11c7312c5cf55772d1ac41fca3e684587f913fae -Author: Luis Pabón -Date: Sun Mar 31 19:34:14 2019 -0700 - - ClusterDomain supports auth - -commit 19f5865bebd0745081dc6689e43a5a3ed3eed00d -Author: Luis Pabón -Date: Sun Mar 31 16:23:45 2019 -0700 - - TLS in Grpc - -commit 43cd3d7540e47af54fb1b14d179583744076079b -Author: Luis Pabón -Date: Thu Mar 28 22:32:29 2019 -0700 - - Support for OpenStorage Auth - - Based from work on #302 by Paul Theunis - - Signed-off-by: Luis Pabón - -commit 8a9f655bc752c43e0bbf79363859eab6cb746cc7 -Author: Luis Pabón -Date: Thu Mar 28 22:32:08 2019 -0700 - - Vendor updates - -commit 218aba57bed9e70fedc706ff3be05a3ded5cdb07 -Author: Aditya Dani -Date: Sat Mar 30 10:32:07 2019 -0700 - - Add constraint on torpedo to branch stork-2.2 - -commit ef0b429d6352bb8081ac566697bfb356f4151bb0 -Author: Rohit-PX -Date: Thu Mar 28 15:09:56 2019 -0700 - - Integration test - restore from local group snapshots. - - Signed-off-by: Rohit-PX - -commit 6775ef28c42bc64965a0de278c51bb79de3b850e -Author: Aditya Dani -Date: Fri Mar 29 17:06:43 2019 -0700 - - Added controllers for ClusterDomainsStatus and ClusterDomainUpdate CRD - - Signed-off-by: Aditya Dani - -commit 622fb0d4aaf4158a466281cdd3917c43ab9880e0 -Author: Aditya Dani -Date: Fri Mar 29 17:06:00 2019 -0700 - - storkctl changes for cluster domains - - Added the following commands for storkctl to manage clusterdomains - - - storkctl get clusterdomainsstatus - - storkctl get clusterdomainupdate - - storkctl activate clusterdomain - - storkctl activate clusterdomain --all - - storkctl deactivate clusterdomin - - Added UTs for storkctl - -commit 9e6c0f84f16032679df8cf55b56a49f8d07f0d31 -Author: Aditya Dani -Date: Fri Mar 29 17:04:47 2019 -0700 - - Add Driver APIs for ClusterDomain changes - - Added the following new Driver APIs and implemented them for Portworx - - GetClusterID - - GetClusterDomains - - ActivateClusterDomain - - DeactivateClusterDomain - - Signed-off-by: Aditya Dani - -commit 5f2c707bad9303d2e7956d856f6afddd1dec8705 -Author: Aditya Dani -Date: Fri Mar 29 17:03:10 2019 -0700 - - Update stork's vendor for ClusterDomain changes - - - openstorage -> release-6.0 - - gossip - - sched-ops - - Signed-off-by: Aditya Dani - -commit f21dae3a1eddf214ef1e9a04fee2af7bd8c33f40 -Author: Harsh Desai -Date: Sat Mar 23 18:46:01 2019 -0700 - - storkctl support for group snapshots - - Fixes #226 - - Signed-off-by: Harsh Desai - -commit e1c479e7ff2ff9d8dc3adccb40a6f532bc64c86f -Author: Aditya Dani -Date: Fri Mar 29 13:14:34 2019 -0700 - - Make ClusterDomain CRDs cluster scoped. - -commit 3df4dfb71ff5692e621d4caf6deb948373844945 -Author: Dinesh Israni -Date: Wed Mar 27 20:07:19 2019 -0700 - - Store finish timestamp for migration - - Also display elapesed time using storkctl - -commit f814202e9092bd289ecf72c072f87a0cdecbdb7b -Author: Dinesh Israni -Date: Thu Mar 28 00:03:16 2019 -0700 - - Add pod watch permission required for health monitor - -commit ddecad8f1d7585ee555e47e8107b7c8242497933 -Author: Harsh Desai -Date: Mon Mar 25 14:06:52 2019 -0700 - - sched-ops vendor - - Signed-off-by: Harsh Desai - -commit 1b8da1a3993a30ac3a1e6b1235d5e7b14fbaae18 -Author: Harsh Desai -Date: Mon Mar 25 14:06:42 2019 -0700 - - Add health monitor for unknown pods - - Signed-off-by: Harsh Desai - -commit 356d61ce53a55a3ccb28aafce9a42c60e93e4846 -Author: Dinesh Israni -Date: Tue Mar 26 21:18:33 2019 -0700 - - Fix bumping time in migration schedule test - - Adding 31 days for the monthly schedule test can cause the schedule to be - skipped for months with 30 days since the date of the month will be one ahead - -commit d66c40c2e2eb0d1e19cea741f215fc52630cd706 -Author: Dinesh Israni -Date: Tue Mar 26 18:04:24 2019 -0700 - - Add support to migrate additional resources - - Following resources will also be migrated now: - - DaemonSets - - ServiceAccounts (except default) - - ClusterRoles (if used in the namespace) - - ClusterRoleBindings (if used in the namespace) - -commit 9622679af806e7a74150b786ebd72717dac5367b -Author: Dinesh Israni -Date: Tue Mar 26 18:04:02 2019 -0700 - - Vendor update for sched-ops - -commit 946e005209ae0183ea303d71069d121b4c899813 -Author: Dinesh Israni -Date: Mon Mar 25 23:34:12 2019 -0700 - - Fixes for migration schedule tests - - - Sleep before checking for error message for invalid schedule. There could be - an error because the policy was not found since they are applied in a different - order. The next reconciliation will be after a minute - - Rollover to the next month/year when finding the right day - -commit af7c178bb47e4e0582daa83932c196fd27cbd16f -Author: Dinesh Israni -Date: Mon Mar 25 15:32:30 2019 -0700 - - Add annotation in PVC after creating snapshot schedule through SC - - This prevents the schedules from getting re-created if a user manually deletes - them - -commit ed8ce5f20b2f77f4b0c1c34ebd4a1a53b0f25c87 -Author: Dinesh Israni -Date: Fri Mar 22 20:08:01 2019 -0700 - - Add validation for interval policy - -commit 1cf300da36884670a4cb7d9699cc4344a966a888 -Author: Dinesh Israni -Date: Thu Mar 21 22:59:25 2019 -0700 - - PVCWatcher: Ignore error if storageclass is not found during update - -commit 6d1c5bcc551af0ad1647e0c001c1800f738ac76c -Author: Dinesh Israni -Date: Thu Mar 21 22:52:16 2019 -0700 - - Add missing permissions for volumesnapshot schedules - -commit 7bcf3b527ed4e102b785fa25b2993cb4e0c98e08 -Author: Aditya Dani -Date: Fri Mar 22 12:08:42 2019 -0700 - - Allow configuring portworx service in stork - - - Get the service name and namespace from env variables. - - Find the ports which PX uses from kubernetes service. - - Signed-off-by: Aditya Dani - -commit 04ed0cea2414e3e6ad56ce0dfb19a60d2dbfb000 -Author: Rohit-PX -Date: Thu Mar 21 18:23:11 2019 -0700 - - Add node start and create separate storageclass for pvcownership test - - Signed-off-by: Rohit-PX - -commit da61b7e36da05cbf7e64fd2de6633b999f351587 -Author: Dinesh Israni -Date: Thu Mar 21 00:21:33 2019 -0700 - - Start snapshot controller first - -commit dfafbe4b7f63f500fc2231a21f67c4a832e6c821 -Author: Dinesh Israni -Date: Wed Mar 20 23:59:52 2019 -0700 - - Update vendor for torpedo - -commit b48d80f423f817d0bd7fd7fb98eb8ae0306aa956 -Author: Rohit-PX -Date: Tue Mar 19 12:00:19 2019 -0700 - - New test for verify pvc ownership fix. - - Signed-off-by: Rohit-PX - -commit e75eb59e8b775f3b3e4c33e9797037b07b05d072 -Author: Dinesh Israni -Date: Wed Mar 20 12:58:55 2019 -0700 - - storkctl: Add all-namespaces param for subcommands - - migrationschedule and snapshotschedule were missing the param - -commit c08231487e32d8c8f78c7ab233e431c264d735ed -Author: Aditya Dani -Date: Wed Mar 20 15:22:12 2019 -0700 - - Generated code for ClusterDomains CRD - -commit 6df7f550e09a7d954ed818609b3b42ff1d2ddbf7 -Author: Aditya Dani -Date: Wed Mar 20 15:20:14 2019 -0700 - - Add CRDs for cluster domains - - - ClusterDomainsStatus - - ClusterDomainUpdate - -commit b79b7265ff5293dd2ad26018340b0ed5a622115e -Author: Harsh Desai -Date: Fri Feb 22 18:43:45 2019 -0800 - - sched-ops and torpedo changes - - Signed-off-by: Harsh Desai - -commit c7a30a22f354fac788efcfd9e0539e003e637d82 -Author: Harsh Desai -Date: Thu Feb 21 17:49:02 2019 -0800 - - Integration tests for migration schedules - - Signed-off-by: Harsh Desai - -commit 4543fbfff2420a643820135965b4b0c843be5b9b -Author: Dinesh Israni -Date: Wed Mar 13 14:18:45 2019 -0700 - - Add controller to watch for changes on PVCs - - Right now it creates snapshot schedules if specified in the - storageclass for a PVC - -commit 47c3eb8229d379c23ae11a825466d039b0ea07a3 -Author: Dinesh Israni -Date: Wed Mar 13 14:17:30 2019 -0700 - - Add storkctl subcommands for snapshotschedule - -commit 3dfe119e2f26017c3ad3053b868f03360cbee54b -Author: Dinesh Israni -Date: Wed Mar 13 14:15:16 2019 -0700 - - Add controller for snapshot schedule - - * Similar to migration schedule, uses retain to decide how many snapshots to - keep - * Enable parameter can be used enable/disable a schedule - * ReclaimPolicy determines what happens to the snapshots triggered by the - schedule when the schedule is deleted. Setting to Delete will automatically - delete the snapshots when the corresponding PVC is deleted. Setting to Retain - (default) will not delete the snapshots. - * Moved the snapshot controllers under one path - - Issue #72 - -commit 5bdd1ec5fe7f6236eb0e4fd1ef5e2dfb46a5a15b -Author: Dinesh Israni -Date: Wed Mar 13 14:11:36 2019 -0700 - - Vendor update for sched-ops - -commit 0d77e6352a29efa2bd43b77b34258a85d4d13b17 -Author: Dinesh Israni -Date: Wed Mar 13 14:11:08 2019 -0700 - - Add CRD for volume snapshot schedules - - Issue #72 - -commit 40ca113d184edf8f44f8b3fcfd900dee8e54caa4 -Author: Dinesh Israni -Date: Tue Mar 19 16:52:33 2019 -0700 - - Add suspend flag to migrationschedule spec - - It is disabled by default - Also update storkctl for the flag - -commit f521df476cdb00a3107647102201465ec6ab8ddd -Author: Dinesh Israni -Date: Tue Mar 19 16:08:52 2019 -0700 - - Extender: Return error if no replicas are online for a volume - -commit 55fc1bfff71c7bd3705593c34a20743dc21c7a56 -Author: Dinesh Israni -Date: Thu Mar 14 17:47:10 2019 -0700 - - [Portworx] Don't rely on storage class to determine ownersip of PVC - - For statically created PVCs there might not be a storage class or annotations - with the provisioner. Looks at the volume source in that case to figure out - the owner - -commit 9a577f63072d4e7c374adc82728181649bd9e832 -Author: Dinesh Israni -Date: Wed Feb 27 23:53:47 2019 -0800 - - Add integration tests for migrating with label selectors - - Issue #271 - -commit c24cad3c69fd155763e4d9a18105b357bd71425f -Author: Dinesh Israni -Date: Wed Feb 27 18:36:17 2019 -0800 - - Add support to use labels to select resources during migration - - Issue #271 - -commit 2cd05389885a9fcdf8dabc480689eebff4ef086c -Author: Dinesh Israni -Date: Tue Mar 12 14:53:43 2019 -0700 - - Update bool in migration specs to pointers so that we can set defaults - -commit 481eeb5564638fa4e62098c9bc75e2d47c23d2ab -Author: Dinesh Israni -Date: Mon Feb 11 15:47:40 2019 -0800 - - Added UTs for activate/deactivate subcommands - -commit 6853c3582c43212fb038f7003cb229482c27478d -Author: Dinesh Israni -Date: Mon Feb 11 13:41:14 2019 -0800 - - Portworx: Return basic volume info in GetPodVolumes() even if Inspect fails - -commit f701e8e55be7cf4ad4c0ff945f1f619057533af7 -Author: Dinesh Israni -Date: Mon Feb 11 13:39:35 2019 -0800 - - Add subcommands to activate and deactivate migrated applications - - The commands look at the deployments and statefulsets in the given namespaces - and update the replica count if they have an annotation specifying that the - application was migrated using stork - -commit a21a6f2af26064b5d39e5a7bd6a46f40b13bfc5c -Author: Dinesh Israni -Date: Mon Feb 4 14:52:04 2019 -0800 - - Add option to skip pairing of storage in ClusterPair - - Also added option in migration spec to skip migrating volumes - This is helpful in cases where the same storage is available from multiple - Kubernetes clusters - -commit cb95e0e650891e2b683896df8951995ec4d755a3 -Author: Dinesh Israni -Date: Thu Feb 28 16:49:43 2019 -0800 - - Recreate service during migration for conflicting node port - -commit 5bb9a6892dfd4ba3165b00c5005492e9013bf89f -Author: Dinesh Israni -Date: Thu Feb 28 16:49:25 2019 -0800 - - Update vendor dependencies - -commit 940f663d48880cfc6c0bfa28564bec9e19247921 -Author: Dinesh Israni -Date: Tue Feb 12 17:38:34 2019 -0800 - - Added subcommands to storkctl for SchedulePolicy and MigrationSchedule - - - Added create, get and delete for MigrationSchedule - - Added get for SchedulePolicy - - Added UTs for both resources - -commit 248c6992c5bb116e961ce5b16e192cdf4faa9455 -Author: Dinesh Israni -Date: Tue Feb 12 17:35:00 2019 -0800 - - Add a controller for migration schedules - - - The MigrationSchedule object takes the same parameters as Migration Spec - - It also takes in a SchedulePolicyName - - The reconciler checks every minute if any new migrations need to be triggered - - The status for each migration is stored in the object. Only one successful - status is stored - - Only one migration can be triggered at a time - - The schedule package is used to check is a migration should be triggered for - each type of policy. - - Includes UTs for the schedule package to make sure triggers will fire - correctly - -commit e24ce0105d93c9d9b3baa1790f45a81c0b00ba55 -Author: Dinesh Israni -Date: Wed Feb 13 16:51:20 2019 -0800 - - Update MigrationSchedule CRD to add Migration under Template.Spec - - This is similar to how deployments, statefulsets, etc have pod spec defined - -commit d0f8a22a1e4070ba70c132e6e4535d1a2152d19b -Author: Harsh Desai -Date: Tue Feb 19 17:26:52 2019 -0800 - - Add support for configurable retry count - - Signed-off-by: Harsh Desai - -commit b6873b3f8542edf76699a067e9f3bbd66f7582e2 -Author: Harsh Desai -Date: Wed Feb 20 12:02:47 2019 -0800 - - add pull request template - - Signed-off-by: Harsh Desai - -commit 9efcc832d41163c46a09d57fcef03f4740e017a8 -Author: Harsh Desai -Date: Mon Feb 18 09:45:54 2019 -0800 - - Allow users to update restore namespaces - - Signed-off-by: Harsh Desai - -commit f2bb4f6a1f57700dee981510e35a36af6941a766 -Author: Joost Buskermolen -Date: Thu Feb 21 11:04:32 2019 +0100 - - Fixed a typo - -commit a21adfd82e8a01dba4a5ba2a96921de4bbe293b0 -Author: Harsh Desai -Date: Mon Feb 18 14:58:47 2019 -0800 - - Don't run fio load specs for scale test - - Signed-off-by: Harsh Desai - -commit 9386f9a13e8a77c3a27c233132b54f77d6c53213 -Author: Harsh Desai -Date: Sat Feb 16 16:02:48 2019 -0800 - - update sched-ops vendor to PR branch - - Signed-off-by: Harsh Desai - -commit 01638e4524a756e7fce0d365e044463305b9e824 -Author: Harsh Desai -Date: Sat Feb 16 15:17:00 2019 -0800 - - Add test to load volumes while group cloudsnap is being done - - Signed-off-by: Harsh Desai - -commit 00e2c0d0f8dfff8f471fc56770c8108db57a88e3 -Author: Harsh Desai -Date: Fri Feb 15 08:12:30 2019 -0800 - - For groupsnap scale test, first create all and then verify - - Signed-off-by: Harsh Desai - -commit d4c34c6e2ec37143307e4b53acb7db13e8c5e4fa -Author: Harsh Desai -Date: Fri Feb 15 11:47:13 2019 -0800 - - Use version pkg to compare resource versions - - Signed-off-by: Harsh Desai - -commit 2338d1d42bf531859a85c3c73b3c17d68c4ae5c1 -Author: Harsh Desai -Date: Fri Feb 15 09:02:51 2019 -0800 - - Return status for failed cloudsnapshots - - - is any cloudsnap has failed, the get status API should not fail. Rather it should - return the failed tasks in the response so controller can log an event and then - reset and retry the group snapshot - - Signed-off-by: Harsh Desai - -commit 2747b0908d60d4fa8c07992c74e36a4bce7deac5 -Author: Dinesh Israni -Date: Thu Feb 14 21:53:06 2019 -0800 - - Don't override pod variable when running command in the pod - -commit 6b36e9a4558b68e5f07ebfad017d7aa55d48db0b -Author: Dinesh Israni -Date: Thu Feb 14 17:30:12 2019 -0800 - - Vendor update for sched-ops - -commit 25e3f7ae33e60befbdf98af45fe1c91872ea676a -Author: Dinesh Israni -Date: Wed Feb 13 18:58:09 2019 -0800 - - Add gofmt to Makefile and fix errors that were found - -commit fcee7aeeff2e21cdb1463e31e11019a6a8c90f47 -Author: Dinesh Israni -Date: Wed Feb 13 15:10:31 2019 -0800 - - Add --all-namespaces parameter to storkctl for get subcommands - - Also prints the namespace if the parameter is specified similar to kubectl - -commit 0e95b0854fd79a33609268acce27cc407bcf1eff -Author: Dinesh Israni -Date: Wed Feb 13 15:09:59 2019 -0800 - - Govendor update for sched-ops - -commit ecb1590558e592f87581c56edf484e33eda54940 -Author: Harsh Desai -Date: Wed Feb 13 11:51:14 2019 -0800 - - propagate group snap annotations to child volumesnapshots - - Signed-off-by: Harsh Desai - -commit f1bbe522f8fa020a7dc21900cfc267b62702277e -Author: Dinesh Israni -Date: Wed Feb 13 15:58:13 2019 -0800 - - Copy storkctl for all architectures into the container - -commit 27f86f64a9369d1faec44cf494b1eac048ab199a -Author: Dinesh Israni -Date: Tue Feb 12 15:17:37 2019 -0800 - - Add CRDs for SchedulePolicy and MigrationSchedule - -commit f2d62e6ad10cafb1d0b91c27994259048ecd5917 -Author: Dinesh Israni -Date: Tue Feb 12 15:16:52 2019 -0800 - - Split up CRDs into different files - -commit 90214bd5013f7f435d969ada68d6d33e67d5418c -Author: Harsh Desai -Date: Tue Feb 12 14:58:55 2019 -0800 - - Fix ready and pending snapshot conditions - - Signed-off-by: Harsh Desai - -commit b358d67e16cf592ae2d212e462659f72dc1e6352 -Author: Dinesh Israni -Date: Mon Feb 4 14:53:58 2019 -0800 - - Also set the namespace annotations from source cluster during migration - -commit d4fa569978ad4b5da75ef29dc1cf6459bf1b215f -Author: Dinesh Israni -Date: Fri Feb 1 15:07:21 2019 -0800 - - Portworx: Set snapshot type for group localsnap - - Also assume local snap if type isn't set - -commit 5196a74117e40b1fee20f922764f74bbae5efbea -Author: Dinesh Israni -Date: Wed Jan 30 19:51:16 2019 -0800 - - Reset kubeconfig in case of failure in migration test - -commit a3a4eaed6a8a983efcec4ac348292225366247f6 -Author: Dinesh Israni -Date: Wed Jan 30 19:19:15 2019 -0800 - - Update version to 2.2.0 - -commit fc42c75aeeb608368a2e84dc297a9bbc53e7f962 -Author: Dinesh Israni -Date: Wed Jan 30 19:03:24 2019 -0800 - - Fix storage class in cassandra integration test - -commit e0c32e2ffb6774c360bde4c86abba66171a8bca7 -Author: Piyush Nimbalkar -Date: Thu Dec 20 15:48:23 2018 -0800 - - Add a StorageCluster CRD to manage cluster - - - Added a controller which does nothing as of now except for watching over StorageCluster object - - The controller starts by default with stork, can be disabled using params - - Placement spec and node status in storage cluster CRD - - Using pointers for some fields so we can distinguish between an empty - value and default value - - Disabling cluster controller by default - -commit 16554cc4bb96be5f7076f5aa381ef40948b31de7 -Author: Dinesh Israni -Date: Fri Jan 25 13:38:34 2019 -0800 - - Add options to storkctl for migration pre/post exec rule - -commit ad6c521b2c3b4ddd6d9a2b2ab9bc3245d5a15755 -Author: Dinesh Israni -Date: Tue Jan 22 19:21:08 2019 -0800 - - Add integration tests for migration pre/post exec rules - -commit 612c8e77375a7cca8120be5c7b03cc2e327d80e3 -Author: Dinesh Israni -Date: Fri Jan 18 21:00:50 2019 -0800 - - Add Pre/Post Exec rules for migration - -commit 183a546a48bfd6df2a903afb90bbff60f29188ac -Author: Dinesh Israni -Date: Tue Jan 22 19:18:31 2019 -0800 - - Vendor update - -commit ee251e39c92022714516c76f3bf296fd5fa83e3b -Author: Dinesh Israni -Date: Tue Jan 29 18:51:51 2019 -0800 - - Update migration spec in integration test to point to correct namespace - -commit 9f690e7c9f9a4d980924fbeac9ad0a577bc5ea51 -Author: Dinesh Israni -Date: Tue Jan 29 17:14:12 2019 -0800 - - Portworx: Use volumeName for cloudsnaps during inspect after restore - -commit 997b84355986b7d52f9658550dda4ede2fd11ed8 -Author: Dinesh Israni -Date: Tue Jan 29 12:59:40 2019 -0800 - - Updated vendor for torpedo and sched-ops - -commit 1be1eb98499bd52c4bcc97e4b7520d32d50e6786 -Author: Harsh Desai -Date: Thu Jan 17 15:59:30 2019 -0800 - - Add integration tests for group local and cloud snaps - - Signed-off-by: Harsh Desai - -commit a9102584511b7742fa582b3e391a5387d89709e4 -Author: Dinesh Israni -Date: Tue Jan 29 14:02:51 2019 -0800 - - Portworx: Use volumeID instead of volume name after creating snapshot - -commit 4042abb54093f253b427b94987732a760796385c -Author: Dinesh Israni -Date: Tue Jan 15 18:11:16 2019 -0800 - - Don't delete clusterIP when migrating headless service - -commit 99de5e62d80bd530993ea99ee14305e23ef5f0cd -Author: Dinesh Israni -Date: Tue Jan 15 18:09:48 2019 -0800 - - Rename groupvolumesnapshot rule fields to PreExec/PostExec - - Same generic names will be used in migration to be consistent - -commit 3ede620bf41c6539417fb40487e115c87831ce4b -Author: Dinesh Israni -Date: Fri Jan 18 16:03:11 2019 -0800 - - Create directory for clusterpair spec in integration test - -commit 630d1f6b86b9bdf8fc10e01293bc82688b658efd -Author: Dinesh Israni -Date: Mon Jan 14 13:39:34 2019 -0800 - - Update Rule CRD - - Renamed spec to rules. Spec should only be used for objects that - have to be reconciled (ie have a status) - - Also updated the examples. - -commit a34edf5799cfa146e72f62a8a18d3b0a0019eaac -Author: Harsh Desai -Date: Fri Jan 18 12:17:15 2019 -0800 - - check for nil annotations on rule - - Signed-off-by: Harsh Desai - -commit ea08905b606590a62476855c0e76ab37358f4db2 -Author: Harsh Desai -Date: Thu Jan 17 09:34:11 2019 -0800 - - minimum resource version tracking needs to be per group snapshot - - Signed-off-by: Harsh Desai - -commit 17dc4dee9f0945c0f4f3f55832c7859ab88ac7d9 -Author: Harsh Desai -Date: Wed Jan 16 15:45:01 2019 -0800 - - review comments - - - don't use group snapshot from caller on errors - - raise event for pod not found - - use map for duplicates - - Signed-off-by: Harsh Desai - -commit c6b1d371c602baf9b01e2aea2c78771521b9cfc2 -Author: Harsh Desai -Date: Tue Jan 15 12:22:18 2019 -0800 - - vendoring changes - - Signed-off-by: Harsh Desai - -commit 35711364ebc52cb8728cc24e92683c9ddeb6fdff -Author: Harsh Desai -Date: Wed Jan 16 13:26:56 2019 -0800 - - Fixes for rule recovery - - - Pods running rule commands were not being tracked in snapshot annotations - - Rule recovery was not getting invoked for group snapshots - - Existing pods in tracker were getting overridden by new pods - - Signed-off-by: Harsh Desai - -commit 5253d5052949d96b3bc5712cf6bcd6264cdbc037 -Author: Harsh Desai -Date: Wed Jan 16 14:06:50 2019 -0800 - - validate pre and post checks in initial stage - - Signed-off-by: Harsh Desai - -commit 3ae7cc09050ff59e4bd121dc96a1b83d640f8364 -Author: Dinesh Israni -Date: Mon Jan 14 13:53:11 2019 -0800 - - Update README.md - -commit 2d46472b0799caf7c0c761da0eb1edce9a335fb8 -Author: Harsh Desai -Date: Mon Jan 7 10:59:07 2019 -0800 - - vendoring changes for group snapshots - - Signed-off-by: Harsh Desai - -commit 706dd3e56f6af0425ef1cf57576fef1d6454a352 -Author: Harsh Desai -Date: Fri Dec 21 16:15:15 2018 -0800 - - Add controller for groupvolumesnapshot - - Signed-off-by: Harsh Desai - - Address review comments - - - process cloudsnap failed tasks - - revert local and cloudsnaps as soon as the first failure is observed - - allow deletes of legacy group snapshots - - group snapshot controller part of snapshot controller - - fix cassandra restore pvcs - - fix v1 imports - - fix duplicate events - - revert active cloudsnapshots too - - use groupsnap logger - - move specs to examples - - use explicit variables to track done and active IDs - - Signed-off-by: Harsh Desai - -commit d70a15dba2b384e421e7e44c82218dbf3c88291c -Author: Harsh Desai -Date: Thu Dec 20 16:01:41 2018 -0800 - - Add group volume snapshot CRD - - Signed-off-by: Harsh Desai - -commit af099b96ee61bce5d0eb2ef23dfffdb46700f31f -Author: Harsh Desai -Date: Mon Jan 7 11:38:31 2019 -0800 - - fix new gosimple checks - - - pkg/initializer/initializer.go:140:9: assigning the result of this type assertion to a variable (switch obj := obj.(type)) could eliminate the following type assertions: - - Signed-off-by: Harsh Desai - -commit 5ae0e1ffdc04af5059cfd80eb6239af3dbf52e50 -Author: Dinesh Israni -Date: Tue Dec 18 00:28:48 2018 -0800 - - Update alpine packages and add ca-certs during docker build - -commit b8bce235e77fca8d34e736879d5f82d64485e992 -Author: Dinesh Israni -Date: Mon Dec 17 19:00:27 2018 -0800 - - Update master version to 2.1 for next release - -commit beb8947e4a00d6b05a870798c0d07537087cf763 -Author: Dinesh Israni -Date: Mon Dec 10 07:27:43 2018 -0800 - - Fix version regex in Portworx driver - - Issue #216 - -commit c440015ec7e2fcd39d6f10af83f4e9fe1a5939dd -Author: Dinesh Israni -Date: Mon Dec 3 17:10:47 2018 -0800 - - Set theme jekyll-theme-cayman - -commit eb6edc454644c13869d95628cae85641cb3783c9 -Author: Dinesh Israni -Date: Mon Dec 3 11:59:05 2018 -0800 - - [Portworx] Also check for CSI provisioner name for ownership - -commit c6eebc91ca8828327fc96228d8a207f36fc89270 -Author: Dinesh Israni -Date: Fri Nov 30 15:55:58 2018 -0800 - - Bump version to 2.0.0 (#212) - -commit 612f0d327810e739476388386e0656ee723bae82 -Author: ram-infrac <35250819+ram-infrac@users.noreply.github.com> -Date: Fri Nov 30 22:45:09 2018 +0530 - - Create mysql app before scheduling clusterpair (#211) - - Signed-off-by: Ram Suradkar - -commit eb687e3c4ccb632f8875be841a3e6f0d46c94ce5 -Author: ram-infrac <35250819+ram-infrac@users.noreply.github.com> -Date: Fri Nov 30 12:45:57 2018 +0530 - - Add namespace to clusterpair and migration specs (#210) - - * Add namespace to clusterpair spec - - Signed-off-by: Ram Suradkar - - * Make migration and cluster pair in same specs - - Signed-off-by: Ram Suradkar - - * Add Name to clusterpair - - Signed-off-by: Ram Suradkar - -commit 1585dc5f02a75c3883b899f06353d635ae2c657f -Author: Dinesh Israni -Date: Thu Nov 29 19:16:34 2018 -0800 - - Add name and namespace when generating clusterpair - -commit 1233c99cfec41a9741100729e115360291fee6c6 -Author: Dinesh Israni -Date: Thu Nov 29 17:50:07 2018 -0800 - - Print stork version during startup - -commit 9ed43d91b03bfb4aa2e3bfe3c56a78e7225926c1 -Author: Dinesh Israni -Date: Wed Nov 28 19:22:38 2018 -0800 - - Allow configuring an admin namespace that can migrate all other namespaces - -commit 449b5c6117ad9f54e976d1fded6fab3ff8b8ba74 -Author: Dinesh Israni -Date: Wed Nov 28 19:14:06 2018 -0800 - - Add rule CRD register which was removed in code refactor - -commit 3609b8cc62a4e4e598804525bb22237b2920564d -Author: Dinesh Israni -Date: Wed Nov 28 20:17:28 2018 -0800 - - Add UTs for all the log APIs - -commit beb68d5ac109f245532678978763457617a11403 -Author: Dinesh Israni -Date: Wed Nov 28 19:04:29 2018 -0800 - - Update storkctl UTs to use base command - -commit a0866c5ab2006db8bf2d05f2255890199cdc5654 -Author: Dinesh Israni -Date: Wed Nov 28 00:39:40 2018 -0800 - - UTs for storkctl migration subcommand - -commit 9a57de713aff11c1bc88d58764af575a2b80a001 -Author: Dinesh Israni -Date: Tue Nov 27 09:33:42 2018 -0800 - - Add some more UTs for storkctl clusterpair commands - -commit e2ee65f042ee337b61fddcb14ac2db3e80953682 -Author: Dinesh Israni -Date: Tue Nov 27 09:33:26 2018 -0800 - - Update dependencies for k8s client-go and sched-ops - -commit c191e60c8afb0a2b8ac16f528c773d6a13de1adf -Author: Dinesh Israni -Date: Wed Nov 21 14:03:39 2018 -0800 - - [strokctl] Add return after checkErr and use namespace when creating migration - -commit a020787024f043f4de0fc91603c93ba766e7371b -Author: Dinesh Israni -Date: Wed Nov 21 14:04:46 2018 -0800 - - Add some more UTs for storkctl - -commit 91bfcd1f8ae0120faaf86f177ed7bc2ecd5696a6 -Author: Dinesh Israni -Date: Tue Nov 20 21:42:34 2018 -0800 - - Limit migration to namespace of migration object - -commit 9fa83ef7367c96ec9fcf95212fcb36e647a2259b -Author: Dinesh Israni -Date: Tue Nov 20 19:36:49 2018 -0800 - - Vendor update for sched-ops and torpedo - -commit ced3376a21109a8206189999bb97e98844d72bfa -Author: Dinesh Israni -Date: Tue Nov 20 17:18:18 2018 -0800 - - Change clusterpair and migration to be namespaced - -commit 118edf7180827650eff50cff6644286bc77d418f -Author: Dinesh Israni -Date: Tue Nov 20 21:56:17 2018 -0800 - - [Portworx] Add eta information to migration info - -commit 42f16dd296c9f84276230b29f17da2981de80594 -Author: Dinesh Israni -Date: Tue Nov 20 21:56:00 2018 -0800 - - Vendor update for openstorage - -commit 4ad2fd1a21b4128620f0db093c478f4dba2a5667 -Author: Dinesh Israni -Date: Tue Nov 27 09:38:11 2018 -0800 - - When logging pod info check for owner pointer before dereferencing - - Also added UT for pod log - -commit 1f416c77c53d18bb3f0ef678d199adfcd1cfd086 -Author: Dinesh Israni -Date: Tue Nov 13 14:04:12 2018 -0800 - - Fix incorrect status update after resource migration - -commit bf5b9227af48cd65463d196cbc6d1de98bacdbcc -Author: ram-infrac <35250819+ram-infrac@users.noreply.github.com> -Date: Tue Nov 20 14:26:48 2018 +0530 - - Add liveliness and readiness probe to mysql-1-pvc spec (#200) - - Signed-off-by: Ram Suradkar - -commit 78af18cfb5f2d5f737f425c4904b8327d36e2c61 -Author: Dinesh Israni -Date: Tue Nov 13 20:31:39 2018 -0800 - - [Portworx] Implement cancellation of migration - -commit a166c619ec5368d1a11e5dc14ebb97709d03cfad -Author: ram-infrac <35250819+ram-infrac@users.noreply.github.com> -Date: Sun Nov 18 17:26:04 2018 +0530 - - Add Basic sanity cloud migration integration tests (#176) - - * Govendor update for torpedo - - Signed-off-by: Ram Suradkar - - * Add Sanity Integration test for Cloud Migration - - - Read configMap and dump cluster kubeconfig info - - Get remote cluster Info - - create clusterpair spec file - - apply migration spec file - - wait for application running on remote cluster - - Signed-off-by: Ram Suradkar - - * Add createCRDObjects() under schedule interface - - -- added rescan API for specDir - - Signed-off-by: Ram Suradkar - - * Changed test to accomodate Schedule() call - - - move migrs directory under specs/ - - remove createCRDObjects() call - - Signed-off-by: Ram Suradkar - - * Change cluster pair constant to match options section - - - remove unnecessary log - - Signed-off-by: Ram Suradkar - - * Remove clusterpair parsing from stork-test - - - use storkctl to generate clusterpair - - move cloud_migration to migration test - Signed-off-by: Ram Suradkar - - * Vendor update for torpedo - - Signed-off-by: Ram Suradkar - - * Review Changes - - Signed-off-by: Ram Suradkar - - * Vendor updates for Torpedo - - Signed-off-by: Ram Suradkar - - * vendor updates - - Signed-off-by: Ram Suradkar - - * Set k8s_ops to default after storkctl generate - - Signed-off-by: Ram Suradkar - - * Added generating configmap from remote kubeconfig in test-deply script - - - added review comments - Signed-off-by: Ram Suradkar - -commit 23f54c23ed52f32654004763171819e594787f4e -Author: Craig Rodrigues -Date: Thu Nov 15 18:07:09 2018 -0800 - - Add tests for pvc - -commit fdd9c8eaeeed0d260dd5d3bfc41cfd02034a8711 -Author: Craig Rodrigues -Date: Tue Nov 13 00:33:57 2018 -0800 - - Add tests for cluster pair and migration. - - Rename testSnapshotsCommon to testCommon for use in clusterPair tests - Refactor testCommon() for use in ClusterPair tests - - Comment out tests which need more work - -commit 3d8747d16e6df13d87a194723e2cc779b72aa44a -Author: Craig Rodrigues -Date: Tue Nov 13 17:33:08 2018 -0800 - - Rearrange use of util.CheckErr() to fix unit test in failure scenario - -commit 42935e10a86c53c904e47d67576c32eae4761c6c -Author: Craig Rodrigues -Date: Tue Nov 13 17:32:02 2018 -0800 - - Check for nil config.Contexts[currentContext] - -commit b61f4265e3ead39f98fc8002575bfc4df299825c -Author: Dinesh Israni -Date: Tue Nov 13 14:27:27 2018 -0800 - - Update generated code for CRDs - -commit 3ebdcfde95b76d914dd5f50854f4ec284161be22 -Author: Dinesh Israni -Date: Tue Nov 13 14:26:02 2018 -0800 - - Update code-generator dependecy to kubernetes-1.11.0 - -commit 012ef5d832efb11c43ad541150afff8a1511b5e2 -Author: Dinesh Israni -Date: Tue Nov 13 13:11:03 2018 -0800 - - Remove clusterIP from service before migrating - -commit 67bf9e1dc87f1e2a43b3f88ec61049e78e8c810f -Author: Craig Rodrigues -Date: Mon Nov 12 15:58:33 2018 -0800 - - Add tests for creating and deleting snapshots - -commit 4e184ab082c64aee7daca2bac65ce8e74732d091 -Author: Craig Rodrigues -Date: Mon Nov 12 19:07:40 2018 -0800 - - Replace fmt.Printf() with printMsg(), to use proper ioStreams. - -commit 3ab6bb1b83487d0ca2c26494f81ee57e69475ba9 -Author: Dinesh Israni -Date: Fri Nov 9 18:52:03 2018 -0800 - - Add unittest for version subcommand - -commit 1fa341e1841ed7bbb7ba5a2a7e9285635c7221ea -Author: Dinesh Israni -Date: Fri Nov 9 18:41:20 2018 -0800 - - Add version to storkctl - - - Use generated version in stork - - Use version-gitSHA - -commit ca18866a97ca2f6c168657dc8c36bd489bbc8e09 -Author: Craig Rodrigues -Date: Mon Nov 12 16:16:11 2018 -0800 - - "Atleast" should be "At least" - -commit 58d4546cde6bcd724550274d8642b6235673d497 -Author: Dinesh Israni -Date: Fri Nov 9 18:20:25 2018 -0800 - - Process all migrations before returning - - Still wait for all migrations to complete before updating status - -commit 14b751a545a2446af2046b271796c5ed36ce887d -Author: Dinesh Israni -Date: Fri Nov 9 00:48:10 2018 -0800 - - Set Kind in snapshot object before executing rule - - It doesn't seem to be set always when being passed in from the snapshot - controller. With that the GetObject() and UpdateObject() APIs can't determine - the objects type. - -commit d7de0de3c34a3db8ecd0bc69b938898023f1349f -Author: Dinesh Israni -Date: Thu Nov 8 18:29:05 2018 -0800 - - Make sure namespaces exists before starting migration - - Also don't check for volume migration status if none are being migrated - -commit 59cd317e89dfe11c002e05efe8ef395e02ec7876 -Author: Craig Rodrigues -Date: Mon Oct 15 16:21:11 2018 -0700 - - Add test for snapshots - - - Add test functions for mock testing kubernetes API server - These functions are taken from 'get_test.go' in the kubernetes - repository. - -commit ad41e1ecc67d27a3419c738afad1864fc1034653 -Author: Craig Rodrigues -Date: Fri Nov 9 11:27:05 2018 -0800 - - Update dependencies - -commit 01d1b4770f98b9c73de21d720c3cbd7e732ddbb9 -Author: Dinesh Israni -Date: Thu Nov 8 15:44:33 2018 -0800 - - Push storkctl only from master - -commit c6b17627f07aa00d431fd916746258275377f9b3 -Author: Dinesh Israni -Date: Thu Nov 8 15:05:36 2018 -0800 - - Update dependencies - -commit 597bc3e30789118afec6b545a25470538af4683d -Author: Dinesh Israni -Date: Thu Nov 8 14:57:13 2018 -0800 - - Use kubectl helper to deal with errors - - Error behavior can be overwritten for tests - -commit f84eb8ca1a6c4923d10a1b55da6fc5c6cdfd6a4f -Author: Dinesh Israni -Date: Thu Nov 8 14:35:52 2018 -0800 - - Add auth providers to storkctl - -commit 990a3a54b3e30b9dc32668971c5c87ecb117d547 -Author: Dinesh Israni -Date: Thu Nov 8 13:55:50 2018 -0800 - - Vendor update for sched-ops - -commit 7fcd63af6cce2ff0b034e05297a38319ee743ca5 -Author: Dinesh Israni -Date: Wed Nov 7 18:05:32 2018 -0800 - - Read files when generating cluster pair and populate inline - -commit b3e1f454d84f95ea2c18bebb1c529ed3dc309773 -Author: Dinesh Israni -Date: Wed Nov 7 18:28:09 2018 -0800 - - Add empty PersistentVolumeRef to VolumeSnapshotData - -commit 81b85e586b4a84d0636be585d65d1635515b198e -Author: Craig Rodrigues -Date: Mon Nov 5 18:06:12 2018 -0800 - - Add SetOutputFormat() to Factory interface - - This is needed to facilitate tests. - -commit 64894c894119f5997829d7ec0faa7f67862e3314 -Author: Dinesh Israni -Date: Tue Nov 6 20:29:54 2018 -0800 - - Replace gcloud binary path in generated clusterpair - -commit a4887eb30babfa5166421bc4ec1851cc5d2f8523 -Author: Dinesh Israni -Date: Tue Nov 6 20:15:24 2018 -0800 - - Only add info for current context when generating cluster pair - -commit 80057754a36499c88bd1929b6ba422c85c97cbbb -Author: Dinesh Israni -Date: Tue Nov 6 20:01:20 2018 -0800 - - Add gcloud to the stork container - -commit 1bf3f0c005e3293d11eefb075f0074c160ae2d7f -Author: Dinesh Israni -Date: Tue Nov 6 18:53:18 2018 -0800 - - Update permissions for get and list of all resources - -commit decc97bf178062c75a0e0a7cc34caad15e0dc2ac -Author: Craig Rodrigues -Date: Mon Nov 5 13:37:47 2018 -0800 - - Update golang version in travis to 1.11.2 - -commit e201a7525d7e0f670d3b0eac8d8bf79dd15b61da -Author: Dinesh Israni -Date: Sun Nov 4 00:00:28 2018 -0700 - - Add check to resolve hostname when matching nodes for k8s on DC/OS - -commit 9d67b9a286a312596e1d2a486d6398cddd247aed -Author: Dinesh Israni -Date: Sat Nov 3 23:37:48 2018 -0700 - - Disable leader election for snapshot controller - - Leader election already happens in stork - -commit 55351568b6826b68cb6aa9e1fc80d4cbd123cdce -Author: Dinesh Israni -Date: Fri Nov 2 23:42:22 2018 -0700 - - Fix extender unittest - - Also exit with error in Makefile if unittest fails - -commit 34f7be8cdfadfc64216db6484401151f6826b8bc -Author: Dinesh Israni -Date: Fri Nov 2 22:27:48 2018 -0700 - - Don't use namespace for cluster resources with dynamic client - -commit 34ae11c2ed127ea6de9ea9cc9fea351d9c5c71e1 -Author: Dinesh Israni -Date: Fri Nov 2 22:26:41 2018 -0700 - - Update branch to use for openstorage - -commit 021861735cb69f256c4cf03891636505faf84eab -Author: Dinesh Israni -Date: Fri Nov 2 16:53:36 2018 -0700 - - Removed unused variables - -commit ed27912790cacad21df043642e8d963f32c0ab04 -Author: Dinesh Israni -Date: Tue Oct 30 16:50:00 2018 -0700 - - Update generated code for CRDs - -commit ac00582e4ec2d7380ab9cd3a7977965a462868bf -Author: Dinesh Israni -Date: Tue Oct 30 15:35:23 2018 -0700 - - Switch from govendor to dep - -commit 4332391a181a54410d4e0b85b7625eb962be7111 -Author: Dinesh Israni -Date: Fri Oct 12 15:31:17 2018 -0700 - - Govendor update - - Update k8s version to 1.11.0 - -commit f1c7d85e684f9856546621144014e9768e8e8fc4 -Author: Dinesh Israni -Date: Tue Oct 9 16:55:08 2018 -0700 - - Refactor rule package - - Removed from snapshot package and added a separate package so that it can be - used by different modules - - Also fixed annotation to use the correct conventions. The old annotations can - still be used but they will be deprecated. - -commit 00b78f58e0dce0ad2552b5e47af0008bbfcbca37 -Author: Dinesh Israni -Date: Wed Oct 17 17:32:28 2018 -0700 - - Update storkctl to pass in streams - - Will be used by unit tests - -commit 46b6aeda499628e08ce6a2d9f3287e2041cb4709 -Author: Dinesh Israni -Date: Thu Nov 1 21:07:35 2018 -0700 - - Disable CGO to allow binaries to run on alpine - -commit 3ef4d02ee27be1dc46a1ea6a964a27432748f0af -Author: Dinesh Israni -Date: Thu Nov 1 12:46:54 2018 -0700 - - Add aws authenticator binary to the container - - Also use alpine instead of atomic and remove things added for - rhel registry - -commit faa6b7a05ccf65e5cf694bf3be2bc063ebeec6e4 (tag: v1.3.0-beta1) -Author: Dinesh Israni -Date: Sat Oct 27 00:03:14 2018 -0700 - - Update version to 1.3.0-beta - -commit d7211031e8c806e6f207fedf917e4b0e342ced85 -Author: Dinesh Israni -Date: Fri Oct 26 15:08:15 2018 -0700 - - Make the storctl binaries publicly accessible - -commit 7e43717fe0d9b12f2f6b6fd45df846d47ff83fb6 -Author: Dinesh Israni -Date: Thu Oct 25 20:20:04 2018 -0700 - - Add command to generate clusterpair spec from destination cluster - -commit b6a1883c81c0318e46633a81c0403497a88001de -Author: Dinesh Israni -Date: Thu Oct 25 17:17:44 2018 -0700 - - [Portworx] Add reason for migration failure - -commit befc40f2e55eaab85f4fc2a1b076b35f99e34b37 -Author: Dinesh Israni -Date: Thu Oct 25 17:17:22 2018 -0700 - - Govendor update for openstorage - -commit 723d782af0c7bc5c3d4ac0db29685ecf26b6e806 -Author: Dinesh Israni -Date: Thu Oct 25 15:47:13 2018 -0700 - - [Portworx] Add namespace to migration task Id - - The pvc name can exists across multiple namespaces - Also use task id to match status instead of volumename - -commit b88865a129c8fc19e6dbf44d665891b2f8b2cbc4 -Author: Dinesh Israni -Date: Tue Oct 23 20:21:53 2018 -0700 - - [Portworx] Update cloudsnap and cloudmigrate APIs to use taskIDs for idempotency - -commit 0a3eff3b4862689dc186cda0e3fc16f514cb93b0 -Author: Dinesh Israni -Date: Tue Oct 23 20:21:39 2018 -0700 - - Govendor update for openstorage - -commit ebdd417403d13b1b07c51fa2f9f649474877465a -Author: Dinesh Israni -Date: Tue Oct 23 20:41:49 2018 -0700 - - Update permissions for migration and cluster pair - - Also remote extra permissions for CRDs and sync permissions in the daemonset - spec - - Issue #165 - -commit 257bf61f348a836d7302bb296e8da1b36ebf8db3 -Author: Dinesh Israni -Date: Fri Oct 19 20:59:48 2018 -0700 - - Add command to create pvc from snapshot - -commit 9f2499d3b4fafc4ec1ec3bcc7faf879d01ad3584 -Author: Dinesh Israni -Date: Fri Oct 19 19:16:33 2018 -0700 - - Upload storkctl binaries to s3 - -commit a6dc36b574cc678857abb01be811e930d5e9d28b -Author: Dinesh Israni -Date: Fri Oct 19 18:55:11 2018 -0700 - - Add storkctl to container - -commit abf1e99a5f902c630bbafdea4059ca1cfe318da2 -Author: Dinesh Israni -Date: Tue Oct 9 16:02:29 2018 -0700 - - Add timeouts for updated APIs - -commit 93ad80bc14f0d83a8f1dce890c9cf27289a2a9a2 -Author: Dinesh Israni -Date: Tue Oct 9 16:01:50 2018 -0700 - - Govendor update - -commit 20b16503157af232844ab87f8059c29eb9467576 -Author: Dinesh Israni -Date: Wed Jun 27 14:23:45 2018 -0700 - - Add storkctl to manage stork resources - - - Subcommands added are create, get and delete - - Supported resources are - - volumesnapshots - - clusterpair - - migration - - Supports global parameters for namespace, kubeconfig, context and output - format through CmdFactory - - Binary is build for linux, darwin and windows - - Issue #80 - -commit 2a03cba14fb1841cbda76668f1ce4d82be15086e -Author: Dinesh Israni -Date: Thu Sep 27 15:43:04 2018 -0700 - - Update migration object to be cluster scoped - -commit c199b0a30aac6545f7383b30b1d4d790f385b1a6 -Author: Dinesh Israni -Date: Mon Sep 24 14:21:26 2018 -0700 - - Change driver registration log to debug - -commit 57de4b753abfddbf933b8f5f8f8e070493bfc9d1 -Author: Dinesh Israni -Date: Mon Sep 24 13:49:12 2018 -0700 - - Update resync parameter for controller for time.Duraion - -commit bc319c510b3e4caa24aba77357641ab03c02022a -Author: Dinesh Israni -Date: Wed Sep 19 21:00:00 2018 -0700 - - Govendor update - -commit f988cd4a8544ab317d4344ce2145e7216b52ae03 -Author: Dinesh Israni -Date: Fri Oct 12 14:44:21 2018 -0700 - - Update golang version in travis to 1.10.4 - -commit f52b861d85a5ed8621eecad05eac5e0a26c3cd4a -Author: Craig Rodrigues -Date: Fri Oct 12 14:33:00 2018 -0700 - - Add missing argument to logrus.Warnf (#156) - -commit 0634241b093712bddd9471ff549a236e0cd0a9ba -Author: Craig Rodrigues -Date: Fri Oct 12 14:16:06 2018 -0700 - - Fix import path of golint (#157) - -commit 2eedbd8d2b412e5c540f5a1e8c960b789c8bfff5 -Author: Dinesh Israni -Date: Mon Oct 8 17:44:09 2018 -0700 - - Add auth providers and default client config loading rules - -commit 97d7aacf3a7839c48d172852d518b9e6ffc41bf0 -Author: Dinesh Israni -Date: Mon Oct 8 17:43:54 2018 -0700 - - Govendor update - -commit 150bf8f0a540c0e674e9f0928ccf5a2f3aff05cf -Author: Dinesh Israni -Date: Mon Oct 8 15:56:34 2018 -0700 - - Don't create namespace if it already exists on remote cluster - -commit 6f3f1e4641b0338489cad2245550a8f5a85341f9 -Author: Dinesh Israni -Date: Sat Oct 6 00:13:00 2018 -0700 - - Check if PV and PVC are owned by driver when migrating - -commit 8ef28ffea28fbe2cf4f8bdd67ddefea7d42aa2f3 -Author: Harsh Desai -Date: Fri Oct 5 10:10:03 2018 -0700 - - Group snapshot should wait till all PVCs are bound (#153) - - Fixes #152 - - Signed-off-by: Harsh Desai - -commit bb74377883b97e153710cb3825cdcca3b7082a55 -Author: Dinesh Israni -Date: Thu Sep 27 19:19:09 2018 -0700 - - Start snapshot provisioner in background since it blocks - -commit 5bd7347a775eaa2fd102fd4b9780ffb707092276 -Author: Dinesh Israni -Date: Fri Sep 28 12:57:16 2018 -0700 - - Fix some issues found from Go Report Card - -commit ca466f7d1a8ba2ea1a6b1434519e21923d08faad -Author: Harsh Desai -Date: Fri Sep 21 17:14:52 2018 -0700 - - Add docs for 3d snaps (#148) - - Fixes #147 - - Signed-off-by: Harsh Desai - -commit 760b94dad5d121e1d3323098c608cc83053c0c71 -Author: Dinesh Israni -Date: Mon Sep 10 14:02:05 2018 -0700 - - Start migration controller from cmd - - The controller manager needs to be started first followed by all the - other controllers - -commit 1e1e73201cf03970edf7fa4683fb4db960452ab9 -Author: Dinesh Israni -Date: Mon Sep 10 13:51:35 2018 -0700 - - Add controllers for pairing and migration CRDs - - Pairing: - - When a pairing is created an API call is made to the storage driver to create - a pair. The k8s config is also verified to make sure we can talk to the remote - cluster - - When a pairing is deleted an API call is made to the storage driver to delete - its pairing too - - Migration: - - Takes in a cluster pair name to which migration should be done - - Users can choose to migration only volumes from storage driver or volumes + - resources (ie PVCs, deployments, statefulsets, etc) - - First volumes are migrated from storage driver and then resources are migrated - if requested - - Stage goes from Initial->Volumes->Application->Final - - Pods aren't migrated right now. Need to migrate pods which don't have a - replication controller - - Service account secrets are not migrated since they belong to a cluster - - Config can be set to not scale up applications (ie deployments, statefulsets) - on remote cluster. This will add an annotation to store what the replica count - was on source cluster - - Events are recorded for various success/failure steps. - -commit 63d66122d6617f06333e55a58125e7b0945f2b12 -Author: Dinesh Israni -Date: Mon Sep 10 13:50:27 2018 -0700 - - Add pairing and migration APIs to volume driver interface - - Add implementation in Portworx driver - Return NotImplemented for mock driver - -commit 18efa0bd4ffb6805893bccadb74f5ab9e7de74ac -Author: Dinesh Israni -Date: Mon Sep 10 13:48:25 2018 -0700 - - Remove integration-test from default target - -commit 83f11dabf44de61d01723add05607e8adba843bb -Author: Dinesh Israni -Date: Mon Sep 10 13:46:42 2018 -0700 - - Add controller package that can be used to watch for changes - - Modules can register for updates for particular types - -commit feb7f1822af28417c8219b12cf239c07521a5c41 -Author: Dinesh Israni -Date: Thu Jul 26 14:47:48 2018 -0700 - - Govendor update - -commit ef54663320aafd3172491d06ac70be615ce1d688 -Author: Dinesh Israni -Date: Mon Sep 10 13:45:51 2018 -0700 - - Add CRDs for pairing and migration - -commit dc214de6633dac7f1145e41158c9596d17d91378 -Author: Dinesh Israni -Date: Thu Jul 26 14:47:48 2018 -0700 - - Govendor update - -commit 5e9308aaa5e98edda465d57515575c5a91be5f39 -Author: Dinesh Israni -Date: Thu Sep 13 19:49:36 2018 -0700 - - Update health monitor to also use better node matching logic - - Issue #144 - -commit cd98a94bde5823d8759c29a06245ae5f0e17cad2 -Author: Dinesh Israni -Date: Thu Sep 13 18:47:56 2018 -0700 - - Update test verification to also look at node IP to match nodes - - Issue #142 - -commit 11e06757f1e53909e731790289ecc520830dec37 -Author: Dinesh Israni -Date: Thu Sep 13 15:45:51 2018 -0700 - - When matching nodes check for internalIP incase hostnames are different - - This is for environments like K8s on DC/OS where the kubelet is running - in a containerized environment and has a different hostname than the physical - node - - Issue #142 - -commit fd7785c6bff6dc4fa04ad6fd68cd69d7fb259340 -Author: Dinesh Israni -Date: Thu Sep 6 17:44:50 2018 -0700 - - Add namespace and other info for logs - -commit d950d641d9c9ceda6d9e245100b076cd8ef7633a -Author: Harsh Desai -Date: Fri Aug 24 13:50:35 2018 -0700 - - Rename storkrule to rule (#138) - - * Rename storkrule to rule - - Closes #128 - - Signed-off-by: Harsh Desai - - * update vendor - - Signed-off-by: Harsh Desai - - * openstorage => libopenstorage - - Signed-off-by: Harsh Desai - -commit 130c674ed2ee159bf86e770605d1b6c1f5bc6f64 -Author: Dinesh Israni -Date: Wed Aug 15 13:11:51 2018 -0700 - - Update integration test script to pass in test image - -commit 56ffaf4e559fcdd9f2e7ca25abb00fd94528478f -Author: Dinesh Israni -Date: Mon Aug 13 19:34:35 2018 -0700 - - Portworx: Update snapshot API call to not retry internally - - Also update cloudsnap status type from openstorage - -commit a04b42072a755ef4b35b2049c72af9e81891378c -Author: Dinesh Israni -Date: Mon Aug 13 19:32:50 2018 -0700 - - Govendor update - -commit ed62abf34c351bfec7c8932c1ea077dc800b642f -Author: Dinesh Israni -Date: Tue Aug 14 14:51:48 2018 -0700 - - Add option to integration test script to pick docker image name - -commit 2d35a36009d76af02a6ced8e7e61280f3219e127 -Author: Harsh Desai -Date: Thu Aug 9 14:40:11 2018 -0700 - - Background command fixes (#133) - - * Background command fixes - - 1) Don't send the background termination channel if there are no background commands - 2) Handle situations where there could be more than one background actions and the first one has a single pod - - Fixes #131 - - Signed-off-by: Harsh Desai - - * Update mysql 3d snap test to handle one more background command action which runs on single pod - - Signed-off-by: Harsh Desai - - * if any action fails, terminate all background jobs - - Signed-off-by: Harsh Desai - -commit fec363db2ab62b8ba23156752c2033d92fa32d06 -Author: Harsh Desai -Date: Mon Aug 6 19:59:56 2018 -0700 - - overide default cmdexecutor for tests (#130) - - * overide default cmdexecutor for tests - - Signed-off-by: Harsh Desai - - * rename override annotation - - Signed-off-by: Harsh Desai - -commit bcd0e9af2f8028404908192b9bfe1a4b0b6fa480 (tag: cmd-executor-v0.1) -Author: Harsh Desai -Date: Mon Aug 6 18:59:32 2018 -0700 - - Add support for 3d snapshots (#118) - - * vendoring changes - - Signed-off-by: Harsh Desai - - * Add support for 3d snapshots - - Signed-off-by: Harsh Desai - - * update sched-ops to get crd api - - Signed-off-by: Harsh Desai - - * addressing review comments - - Signed-off-by: Harsh Desai - - * Update sched ops vendor - - Signed-off-by: Harsh Desai - - * Address review comments - - Signed-off-by: Harsh Desai - - * Just send pvc list to rule api - - Signed-off-by: Harsh Desai - - * Review comments - - Signed-off-by: Harsh Desai - - * validate snapshot rules before starting any snap operations - - Signed-off-by: Harsh Desai - -commit 9b460bdf7ae3f30948347aeb130ba36c7dcc2f5e -Author: Dinesh Israni -Date: Fri Jul 27 15:56:30 2018 -0700 - - Fix codegen to use correct vendor directory - - Also update generated code - -commit 92b40731b33e49a7ab423e635db75ff7b172683d -Author: Dinesh Israni -Date: Fri Jul 27 15:56:55 2018 -0700 - - Govendor add k8s code-gen - -commit 454eadc891b46e01ce6caafcf707c37728320a0e -Author: Harsh Desai -Date: Fri Jul 20 14:48:19 2018 -0700 - - Update sched-ops to fix snapshot validation - - - Updated sched-ops will now continue even if the first validation fails - - Signed-off-by: Harsh Desai - -commit c675875fc70ca8271958d60c76fc64fb4f2028fb -Author: Harsh Desai -Date: Thu Jul 19 11:40:52 2018 -0700 - - fix group name - - Signed-off-by: Harsh Desai - -commit 89616e05ca121f6a24de845909fa6b7e5e114603 -Author: Harsh Desai -Date: Wed Jul 18 19:02:14 2018 -0700 - - Update group name - - Signed-off-by: Harsh Desai - -commit 9d12451f451ee67fce6c74cca8d53bbec04453ec -Author: Harsh Desai -Date: Wed Jul 18 18:54:28 2018 -0700 - - workaround to update vendor manually until torpedo and sched-ops are updated - - Signed-off-by: Harsh Desai - -commit 7b48bbd62fc92dd302a2ca93c47742d8696baf37 -Author: Harsh Desai -Date: Wed Jul 18 18:42:43 2018 -0700 - - rename stork.com - - Signed-off-by: Harsh Desai - -commit c7d67aaa77e15ff61c364498f4564bb3111217fa -Author: Dinesh Israni -Date: Tue Jul 10 13:10:29 2018 -0700 - - Update a test to hit path where node name is different from driver node ID - -commit 33e61579416980c5f25ef71218c46ba3504bc6bb -Author: Harsh Desai -Date: Wed Jul 18 13:17:42 2018 -0700 - - fix integration tests (#122) - - The tests now explictly use the get snapshots call and also verify the volumesnapshot data - - Signed-off-by: Harsh Desai - -commit 8358aea2b66f8d437aee409462da4b403ebd3fa4 -Author: Harsh Desai -Date: Tue Jul 17 18:09:46 2018 -0700 - - Check snapshot data before restoring from it (#121) - - * Update sched-ops vendor to get validate api for snapshot data - - Signed-off-by: Harsh Desai - - * Check snapshot data before restoring from it - Fixes #120 - - Signed-off-by: Harsh Desai - -commit 9f16cc0f713dd456d7af585a5d0fa712a6875b02 -Author: Dinesh Israni -Date: Mon Jul 16 14:00:10 2018 -0700 - - Print node info on failure when checking scheduled node - -commit ab453b9161a1446526d8563d00fccfa55c418bf1 -Author: Dinesh Israni -Date: Tue Jul 10 14:18:18 2018 -0700 - - Add unittests for invalid requests to extender - -commit e20ae5a6631a5e3b9629c8d72ff0782bae2d2cbb -Author: Harsh Desai -Date: Mon Jul 9 22:47:22 2018 -0700 - - Add CRD for stork rules (#115) - - * Add CRD for stork rules - - Signed-off-by: Harsh Desai - - * Add script to generate CRD and the generated files - - Signed-off-by: Harsh Desai - - * build changes - - Signed-off-by: Harsh Desai - - * changes to crd review comments - - Signed-off-by: Harsh Desai - - * Remove GOVET_PKGS from makefile and just use PKGS - - Signed-off-by: Harsh Desai - - * fix tabs in makefile and remove unused comments in update-codegen.sh - - Signed-off-by: Harsh Desai - -commit 051ed128d229cb6e8634d49fe1b884b678f03ff8 -Author: Dinesh Israni -Date: Fri Jul 6 17:32:21 2018 -0700 - - Extender: Also check node name to match driver nodes - - Issue #112 - -commit da0cf1214fcfd24715ce9fdd56ba2ea420639231 -Author: Dinesh Israni -Date: Thu Jul 5 18:40:26 2018 -0700 - - Update travis to push image for all branches (#110) - -commit 4e2201c456ca9ebc6f63d739afa28327f47604ec -Author: Dinesh Israni -Date: Tue Jul 3 14:24:33 2018 -0700 - - Update specs in integration test instead of deleting and recreating - -commit 2e8188d5f413dd93b6bfba83ba992c4c6325f115 -Author: Harsh Desai -Date: Mon Jul 2 16:05:36 2018 -0700 - - Add an executor CLI for running async commands in pods (#104) - - * Add an executor CLI for running async commands in pods - - Signed-off-by: Harsh Desai - - * review changes - - Signed-off-by: Harsh Desai - - * address review comments - - Signed-off-by: Harsh Desai - - * fix gosimple - - Signed-off-by: Harsh Desai - -commit af7f47e692c42246b3c544938dea707a06a4dba9 -Author: Dinesh Israni -Date: Thu Jun 28 20:35:50 2018 -0700 - - Check node cache with ID and hostname - -commit 05dbc099b9d0b2c192d210ddeedd4fc71ed618f0 -Author: Dinesh Israni -Date: Wed Jun 27 15:00:29 2018 -0700 - - Add gosimple checker - - Also fix issues found by gosimple - -commit 47fdb22e42eea1e14e8f28c72dbfa57990a03cde -Author: Harsh Desai -Date: Wed Jun 13 09:08:30 2018 -0700 - - don't enforce pv reference for describe of local snapshots - - Fixes #106 - - Signed-off-by: Harsh Desai - -commit 3a07e70fc5f4c0041a1da96d9fb0685b23216641 -Author: Dinesh Israni -Date: Sun Jun 10 19:41:16 2018 -0700 - - Add snapshot scale test - -commit bd5057d805cf0b503f9d0f9a909fd53c67ae7cfe (tag: v1.1.1) -Author: Dinesh Israni -Date: Thu Jun 7 16:55:09 2018 -0700 - - Bump version to 1.1.1 - -commit 45863a30e1544e9c86c9199be6d1647395b564c1 -Author: Dinesh Israni -Date: Thu Jun 7 16:39:00 2018 -0700 - - Add support to restore snapshots to different namespaces - - - When creating snapshots users need to provide comma seperated regexes - with "stork/snapshot-restore-namespaces" annotaion to specify which - namespaces the snapshot can be restored to - - When creating PVC from snapshots, if a snapshot exists in another - namespace, the snapshot namespace should be specified with - "stork/snapshot-source-namespace" annotation - - Issue #71 - -commit 321b525e65a433abbc6de8b63d864f73cdbf8ca4 -Author: Dinesh Israni -Date: Thu Jun 7 03:19:43 2018 -0700 - - Add a cache for node info - - Querying the API server for each node takes too long and times - out the requests in a large cluster - - Issue #99 - -commit 531a1a84f54d29d5b4b5685a32edaaa74286ac26 (tag: v1.1) -Author: Harsh Desai -Date: Thu May 31 17:01:55 2018 -0700 - - Update vendor to pull in sched-ops fix for validating snaps (#97) - - Signed-off-by: Harsh Desai - -commit 21ecd68f4c40cbf3eb9a2e3784996a227b5de23f -Author: Harsh Desai -Date: Wed May 30 08:37:32 2018 -0700 - - retry inspect volumes for group snapshots (#95) - - Signed-off-by: Harsh Desai - -commit 744bf489a533d33bd2ab5118d694816039fd33a2 -Author: Harsh Desai -Date: Wed May 23 23:27:23 2018 -0700 - - Ensure version check for group and cloud snapshot (#94) - - * Ensure version check for group and cloud snapshot - Fixes #82 - - Signed-off-by: Harsh Desai - - * retry on cluster enumerate - - Signed-off-by: Harsh Desai - -commit 6bf00e23b10cb767376cf3db808e2fa33c7d39df -Author: Harsh Desai -Date: Wed May 23 13:34:49 2018 -0700 - - update group snapshot annotation keys (#93) - - * update group snapshot annotation keys - - Signed-off-by: Harsh Desai - - * remove unnecessary checks for labels - - Signed-off-by: Harsh Desai - -commit 818bbdf3e7ed2ff9ed85f642908a9338df95523f -Author: Dinesh Israni -Date: Mon May 21 18:04:16 2018 -0700 - - Make the health-monitor interval configurable - - Default is 120 seconds and minimum is 30 seconds - - Also update version for upcoming release - -commit 87cdd97964d3cd618ce3bbd589e35d0c12f7dfaa -Author: Dinesh Israni -Date: Thu May 17 16:54:12 2018 -0700 - - If no driver nodes found in filter request return error - - Fixes #89 - -commit 99afb2aa72c097effaadb11fcd3c4edb14a20641 -Author: Harsh Desai -Date: Thu May 17 14:30:21 2018 -0700 - - Update vendor to allow error status conditions in failed volumesnapshots (#88) - - Signed-off-by: Harsh Desai - -commit 50cc4666702d7cf2bd42e2f0fa8f2d208cdf81f6 -Author: Harsh Desai -Date: Wed May 16 13:59:10 2018 -0700 - - For group snapshots, include UUID of parent volumesnapshot in child volumesnapshots (#87) - - Fixes #86 - - Signed-off-by: Harsh Desai - -commit fa7d602b8c0ce1c8630706ee9dc68dedb8609c97 -Author: Dinesh Israni -Date: Mon May 14 15:37:06 2018 -0700 - - Update vendor for torpedo - -commit 1be992282d6da65ac594a5698b9e5a025091e7d4 -Author: Dinesh Israni -Date: Thu Apr 26 15:16:40 2018 -0700 - - Update docs for initializer - - Issue #54 - -commit ff8c0a94ed552fda85feb34212dfc7285816abb8 -Author: Dinesh Israni -Date: Thu Apr 26 14:54:09 2018 -0700 - - Add statefulset to integration test to test initializer - - Issue #54 - -commit 22a4ae0b978560ef6e91c1ae5fb787c96bc1045f -Author: Dinesh Israni -Date: Thu Mar 29 13:48:11 2018 -0700 - - Add initializer spec - - Also update deployment spec with comment to enable initializer - - Issue #54 - -commit 7ea4eeb0ff2351bcb075a139bb1475e5d82861cc -Author: Dinesh Israni -Date: Fri Mar 2 22:32:04 2018 -0800 - - Add initializer to update scheduler name - - Scheduler name is updated only if pod is using a volume by the - specified driver - - - Also added cmd args to enable/disable features - - Need a different initializer for v1, v1beta1 and v1beta2 - - Updated test script to enable test with initializer - - Updated APIs for k8s 1.10 - - Issue #54 - Issue #9 - -commit 609a15b3cb4f094e15515a53c72ba5e14c771528 -Author: Dinesh Israni -Date: Tue May 8 17:38:38 2018 -0700 - - Govendor update k8s libraries to 1.10 - -commit 72da8b1bb38cf2fbb10d8feea16bd6803ccb600d -Author: Harsh Desai -Date: Sun May 13 14:44:59 2018 -0700 - - Fix cloudsnap integration to check only data volumes in use (#84) - - * Fix cloudsnap integration to check only data volumes in use - - Signed-off-by: Harsh Desai - - * extract common func to parse data volumes - - Signed-off-by: Harsh Desai - -commit 6a3595d944acdc233713e4d2e25386ae97a4d090 -Author: Harsh Desai -Date: Thu May 10 17:01:33 2018 -0700 - - verify scheduled node only for mysql-data-1 (#83) - - Signed-off-by: Harsh Desai - -commit b355c25df712f4b03bacd0936aea6a9eb2eecf8e -Author: Harsh Desai -Date: Thu May 10 13:59:52 2018 -0700 - - Add support for cloud and group snapshots (#78) - - * vendoring changes - - Signed-off-by: Harsh Desai - - * Add supports for cloud and group snapshots - Fixes #61 - Fixes #55 - - Signed-off-by: Harsh Desai - - * fix travis build - - Signed-off-by: Harsh Desai - - * Address review comments. Key changes - 1) Wait indefinitely for cloudsnap completion - 2) Revert group snaps on partial completions - 3) cloudsnap restore support - 4) add cloudsnap test - - Signed-off-by: Harsh Desai - - * Review comments - - Signed-off-by: Harsh Desai - - * vendor update for cloudsnap operation type - - Signed-off-by: Harsh Desai - - * review comments - - Signed-off-by: Harsh Desai - - * Add missing secrets vendor - - Signed-off-by: Harsh Desai - -commit db0f19df337c3c53ed4b76808f34c00f39c2ed53 -Author: Dinesh Israni -Date: Thu May 3 12:12:43 2018 -0700 - - Use correct volume name for snapshot in torpedo - -commit e77632d4719cc9cc39bc528f88be37165bdd0f92 -Author: Dinesh Israni -Date: Wed May 2 16:48:46 2018 -0700 - - Govendor update for torpedo and sched-ops - -commit fca8c002c358645ccecebf10ee528078da8525ba -Author: Dinesh Israni -Date: Wed May 2 16:22:32 2018 -0700 - - Portworx: Return NotImplemented for FindSnapshot - -commit 5367db20ec559509336f48ce583608b5b8cd8d37 -Author: Dinesh Israni -Date: Tue May 1 13:11:11 2018 -0700 - - Update stork specs to remove predicate not present in 1.9.x - -commit 779a017c68ee9e79f2fcfe540255f61e669660c8 -Author: Dinesh Israni -Date: Tue May 1 12:40:18 2018 -0700 - - Update SnapshotCreate API for Portworx driver - - Issue #55 - Issue #61 - -commit 1e9aa635ba9b1310c8e176f4530aa52fff30edef -Author: Dinesh Israni -Date: Tue May 1 12:40:03 2018 -0700 - - Govendor update for external-storage - - Issue #55 - Issue #61 - Issue #42 - Issue #43 - Issue #69 - -commit 2bbd7d3a63c5a9f0278230c9e4673ecd73bd99d2 -Author: Harsh Desai -Date: Tue Apr 24 12:37:18 2018 -0700 - - Fix docker tag in build instructions (#70) - -commit 1ed4f9f6a31faff5901ede6350de2a3d87268d20 -Author: Dinesh Israni -Date: Fri Apr 20 13:27:03 2018 -0700 - - Portworx: Use snapshot UID for snapshot name to make it unique - - Also add snapshot name and namespace as labels when creating the - snapshot - - Updated integration test - - Issue #67 - -commit 39ca53c69d3215121a5d3b8f99cf571e5aea7c65 -Author: Dinesh Israni -Date: Thu Apr 19 13:22:19 2018 -0700 - - Pick clone volume correctly in test - -commit a3c786b41a0de2a4cd2482aeb08063c321f95f84 -Author: Dinesh Israni -Date: Wed Apr 18 15:23:31 2018 -0700 - - Update snapshot test to also expect snapshot volume - -commit a8a669ca260dd2f8786ff264b52c714ed5f981f5 -Author: Dinesh Israni -Date: Tue Apr 17 18:57:31 2018 -0700 - - Update torpedo API calls for stopping driver - -commit 3ccb35122fcf746b895019bc5ca2c106e35179e5 -Author: Dinesh Israni -Date: Tue Apr 17 18:54:21 2018 -0700 - - Govendor update sched-ops, torpedo and dependencies - -commit 522688da2405233ff21317985407396af8ddfa4b -Author: Dinesh Israni -Date: Tue Apr 17 23:10:16 2018 +0000 - - Update ssh username and password from env variables - -commit 99b6266e3eee5ebf8c7b5e527ad620cb66b703f6 -Author: Dinesh Israni -Date: Tue Apr 17 23:09:24 2018 +0000 - - Add storageclass permissions for scheduler, required for 1.10 - -commit 733dcbae0aff13f530890a5373d2a9cbb2e4d23b -Author: Dinesh Israni -Date: Fri Feb 16 22:59:25 2018 -0800 - - Add support for node locality awareness when priotizing nodes - - Also updated Portworx driver to return rack/zone/region info - - Issue #6 - -commit 3510c416649cee4426cc8ad9ca07090363fff3c3 -Author: Dinesh Israni -Date: Fri Feb 16 21:07:25 2018 -0800 - - Replace k8sutils with sched-ops package - - Issue #17 - -commit 247723c76f71240c5130f5f9529463b6bff58bb3 -Author: Dinesh Israni -Date: Fri Feb 16 19:59:53 2018 -0800 - - Govendor update for torpedo and sched-ops - -commit 577ca9c050bff35bda2e75466065175fd596128d -Author: Dinesh Israni -Date: Fri Apr 6 14:58:36 2018 -0700 - - Use correct path to replace stork tag - -commit b9a2aafafcab53d9eb4727f662b75fbb771c6414 -Author: Dinesh Israni -Date: Thu Apr 5 13:37:59 2018 -0700 - - Check PVC first for provisioner - - Issue #49 - -commit 84ba7e8db90f5e4d6998b36f8de469517458d4c0 -Author: Dinesh Israni -Date: Tue Apr 3 12:55:04 2018 -0700 - - Don't check source PV when restoring from snapshot - - The original PV could have been deleted. - - Issue #57 - -commit 5dee852495a17a46ed422469f1aaa7c0fbc3de98 -Author: Piyush Nimbalkar -Date: Fri Mar 30 15:13:15 2018 -0700 - - Add pvc namespace label to the volume on restoring snapshot - -commit 95a05becbadb91a9abfa90203f20a09d35561299 -Author: Piyush Nimbalkar -Date: Fri Mar 30 15:06:45 2018 -0700 - - Add pvc name label to the restored snapshot - -commit 473c8bf68c3706dfc9f21cf4480536ecd252b4ee -Author: Dinesh Israni -Date: Wed Mar 28 19:10:46 2018 -0700 - - Fail if any unit test hits errors - -commit 27e4f4f90aa73a413fac3899dd730bd6a6730ad6 -Author: Dinesh Israni -Date: Sun Mar 4 15:16:36 2018 -0800 - - Fix error check in extender prioritize request - -commit d39ecfde1f5c56fc6f647efc476a3c96b776a00c -Author: Dinesh Israni -Date: Mon Mar 26 20:43:09 2018 -0700 - - Portworx driver: Check other sources for provisioner - - Issue #49 - -commit 54bc88d83d811950d16204c985bc8164627498d9 -Author: Dinesh Israni -Date: Tue Mar 13 21:19:45 2018 -0700 - - Add codecoverage and badges - -commit 87d0c66d63b4ef0095a30248e3c8b9ff9e3cbf6e (tag: v1.0.3) -Author: Dinesh Israni -Date: Mon Mar 19 18:31:13 2018 -0700 - - Bump version to 1.0.3 - -commit 47380187d3b595875d4a060a82782fec69fca439 -Author: Dinesh Israni -Date: Mon Mar 19 15:41:02 2018 -0700 - - Add some metadata to docker container - -commit be2c099b5a38f2290071fbb79d6e8901b65f6bb7 -Author: Dinesh Israni -Date: Sat Mar 17 23:36:29 2018 -0700 - - Portworx driver: Check for source not being nil during inspect - - Issue #45 - -commit 21dcb685ae5bd3401467a4568fec165fcf7d689e -Author: Dinesh Israni -Date: Tue Mar 13 21:41:47 2018 -0700 - - Create CODE_OF_CONDUCT.md - -commit 3f9ee2d62bda5767692f9e11de7ae3d41ae617c4 -Author: Dinesh Israni -Date: Fri Mar 2 23:01:38 2018 +0000 - - Add instructions to use stork with default scheduler - - Issue #38 - -commit 3a32d6f81bc42719ed29811af06e0346ed750cb7 -Author: Dinesh Israni -Date: Thu Mar 1 16:56:05 2018 -0800 - - Update spec to point to v1.0.2 - -commit b535e8bbfe99ce34ae564c2cad493689435f9c05 (tag: v1.0.2) -Author: Dinesh Israni -Date: Thu Mar 1 16:29:58 2018 -0800 - - Bump version to 1.0.2 - -commit 749c9beb5a63b208e3c35f1f46f9f5d79ce5d2ab -Author: Dinesh Israni -Date: Wed Feb 28 13:37:18 2018 -0800 - - Update test script to use master image tag - -commit 7206fe2657c9eba88edf2e61870b7b120e7f7d53 -Author: Dinesh Israni -Date: Fri Feb 23 18:28:01 2018 -0800 - - Also check for short hostnames in filter request - - Issue #30 - -commit 3818bbdd2f19d0472049642ac5b798d40f70a303 -Author: Dinesh Israni -Date: Fri Feb 23 15:13:10 2018 -0800 - - Govendor add new dependencies - -commit c60ddf5412e0c7e3d33e8428fd8206a750bfc292 -Author: Dinesh Israni -Date: Fri Feb 23 14:52:51 2018 -0800 - - Update sync cache for snapshot controller - - Issue #29 - -commit 9bf8b5a92886911252c6209714c5d0a00be60895 -Author: Dinesh Israni -Date: Fri Feb 23 15:56:34 2018 -0800 - - Check for short hostnames in extender by checking prefix - - Issue #30 - -commit 6250ee576b8948facc827287b48e9777c5a57da4 -Author: Dinesh Israni -Date: Fri Feb 16 18:17:53 2018 -0800 - - Move the container to be based on atmoic - - Issue #27 - -commit c09173e8fe495e3dff9157c7234220e56c42c4b7 -Author: Dinesh Israni -Date: Thu Feb 15 13:22:01 2018 -0800 - - Update README.md - - Fix formatting for snapshot spec example - -commit 80f737f2c9ae6e1795966afff3adf155fb8501e9 -Author: Dinesh Israni -Date: Mon Feb 12 12:14:30 2018 -0800 - - Add note about predicates and priorities in spec - -commit 0fce6aec6759642f76b1a0938336d6a6b75dd115 -Author: Dinesh Israni -Date: Mon Feb 12 11:38:56 2018 -0800 - - Add configmap namespace for scheduler deployment - -commit 44aab7c9a6928f565d11f6c97d93cdd1bd4ab099 (tag: v1.0.1) -Author: Dinesh Israni -Date: Mon Feb 5 14:44:13 2018 -0800 - - Update version to 1.0.1 - -commit 89b6bb1effb25fd80f11b651792bdfc4ca30835a -Author: Dinesh Israni -Date: Thu Feb 1 13:23:57 2018 -0800 - - Convert hostname to lower case to match kubernetes hostnames - - Issue #24 - -commit f49d2a57d24c630efd340d6289b76836e8d73143 -Author: jose nazario -Date: Wed Jan 31 15:18:36 2018 -0500 - - spelling fixes, no content changes (#23) - -commit deebae19e68fa807a01b8d5c3eba81d722c78223 (tag: v1.0, tag: V1.0) -Author: Dinesh Israni -Date: Thu Jan 18 12:52:21 2018 -0800 - - Update specs to remove unnecessary fields - -commit 8a11127e781701ae0d3950c6c9ddcb504c860a85 -Author: Dinesh Israni -Date: Tue Jan 9 14:16:58 2018 -0800 - - Update README.md - -commit cc3ecd9e25c3d43a96562a1c028f2f3fb0429cf3 -Author: Dinesh Israni -Date: Mon Jan 15 02:47:57 2018 +0400 - - Update stork spec to use v1.0 - -commit 343e2816f902947ce993eec80fc1f5dc71261429 -Author: Dinesh Israni -Date: Mon Jan 15 02:39:50 2018 +0400 - - Update version to 1.0 - -commit 7a1d573b6113db1874e52d7c9c25fd7aaa6ec4ee (tag: v0.3.1) -Author: Gou Rao -Date: Wed Jan 10 17:36:19 2018 -0800 - - Update README.md - - fix typo - -commit e791288f4f721e58bdaa9d43375b9f363eaa570b -Author: Dinesh Israni -Date: Tue Jan 9 19:07:15 2018 -0800 - - Wait for integration test to come out of Running state at the end - -commit 7b38516675a0cec2ace60c2fe0cfe2733086b008 -Author: Dinesh Israni -Date: Mon Jan 8 21:10:41 2018 -0800 - - Check for correct status for integration test - -commit 4a5fe058f0f9e310a47c65e1f7b3e6194bbbb697 -Author: Dinesh Israni -Date: Mon Jan 8 20:31:49 2018 -0800 - - Fix for using hostname instead of driver node ID - - The node IDs returned in volume info might not match the hostname - -commit 9da0fc8bd4d5cfd84d4c27fae4dd7773b842dd6a -Author: Dinesh Israni -Date: Mon Jan 8 20:28:53 2018 -0800 - - Govendor update torpedo drivers - -commit 031b4c7f59faabbba6e4a17a772f320ddad8bde8 -Author: Dinesh Israni -Date: Mon Jan 8 15:11:26 2018 -0800 - - Update stork spec to use v0.3.1 - -commit 9d0dd36ae1557e6d66c716b99f1df12d52a68a58 -Author: Dinesh Israni -Date: Mon Jan 8 15:09:01 2018 -0800 - - Make test-deploy.sh executable - -commit 05c6ddfef37f14395dc76e636245bbd618844a43 -Author: Dinesh Israni -Date: Mon Jan 8 14:54:11 2018 -0800 - - Update version to 0.3.1 - -commit f3b978d3c6c036805d1f91ac7437407df5bed418 -Author: Dinesh Israni -Date: Mon Jan 8 13:39:35 2018 -0800 - - Add script to deply and run stork integration tests - -commit 2296144226ae8b58ce8a70d92d186f4f7ca881c6 -Author: Dinesh Israni -Date: Mon Jan 8 10:55:44 2018 -0800 - - Remove stork-snapshot storageclass from integration test spec - -commit 458a84f8a006a70d8863fd1798a4b4cefcfbdfdb -Author: Dinesh Israni -Date: Mon Jan 8 10:53:06 2018 -0800 - - Add stork-snapshot storageclass to spec - -commit 668470cf5d79cc9c5a8a3ae0e8baf4e699d0724e -Author: hr1sh1kesh -Date: Wed Jan 3 16:06:23 2018 +0530 - - Fix RBAC permissions for stork-account on ConfigMaps - -commit fec13849080a9a3875f92ae2c5de5400414b30ae -Author: Dinesh Israni -Date: Thu Dec 28 13:32:30 2017 +0400 - - Add specific permissions for stork and the scheduler - -commit f30d98f39219176e28b70cbcf781704a7c3b3382 -Author: Dinesh Israni -Date: Thu Dec 28 13:22:47 2017 +0400 - - Start extender on all replicas - -commit d82e6c377ecd08112c4ef26232d8da9c6ca2610d -Author: Dinesh Israni -Date: Wed Dec 20 06:17:10 2017 +0100 - - Update stork-scheduler.yaml - -commit 53f81d8487b86f4228e5717b182aefe1ac888e06 -Author: Dinesh Israni -Date: Tue Dec 19 21:05:45 2017 -0800 - - Update stork-scheduler.yaml - - Update spec to not run more than one pod per node - -commit 7fd4ae356261d4e61957a4cd213c81e3023ee06c -Author: Dinesh Israni -Date: Wed Dec 20 05:59:14 2017 +0100 - - Update stork-deployment.yaml - - Update stork spec to not run more than one pod per node - -commit f3cafb73a438a772adacd99a282a1425dd61298e -Author: Dinesh Israni -Date: Fri Dec 15 18:43:18 2017 -0800 - - Update stork to 0.3 in spec and cmd - -commit cda37a5847f47c04f7d765165f7b1eb43d6b947d -Author: Dinesh Israni -Date: Fri Dec 15 18:42:25 2017 -0800 - - Update README.md - -commit d65bdafa61bce7e2008fa88aeae0589f4c33f73a -Author: Dinesh Israni -Date: Fri Dec 15 18:40:11 2017 -0800 - - Update stork deploment spec to run 3 replicas - - Issue #10 - -commit 5267f64ded2d2622ea6b9f230b0d8f29e0a28b65 -Author: Dinesh Israni -Date: Fri Dec 15 18:31:36 2017 -0800 - - Wait 1 minute before deleting pod in extender test - -commit 9cab0ebc3e66b4c7216e10815e7dbb25380006cf -Author: Dinesh Israni -Date: Thu Dec 14 18:38:41 2017 -0800 - - Add support for leader election when starting up - - Leader election is enabled by default - Can specify lock object name and namespace as cmd args - - Issue #10 - -commit bae23895e95f960cce3ca496e506ebde99aefd4f -Author: Dinesh Israni -Date: Thu Dec 14 18:37:54 2017 -0800 - - Govendor update - -commit 00b128afdb05c21d9e7350fffbfd8d5216c24120 -Author: Dinesh Israni -Date: Wed Dec 13 19:37:43 2017 -0800 - - Add basic integration tests for snapshot - - Issue #8 - -commit 572c1b1ed4694899e37e4a8ba9a2be072238d86f -Author: Dinesh Israni -Date: Wed Dec 13 19:34:39 2017 -0800 - - Govendor update - -commit 1e50be0d9fc379cb83b901498114eef1bec77c63 -Author: Dinesh Israni -Date: Wed Dec 6 13:55:43 2017 -0800 - - Fix error check when stopping extender server - -commit da800c2b6a4e57ea291adf02f51cb7a4e596d1bf -Author: Dinesh Israni -Date: Tue Dec 5 17:39:17 2017 -0800 - - Add timeout context for shutting down extender - -commit 4f4bbd0a2a94a0757486433d4944845be8081392 -Author: Dinesh Israni -Date: Tue Dec 5 16:58:11 2017 -0800 - - Replace Sirupsen with sirupsen in vendored packages - -commit f45ce061cd1b58b11f9b88515c69c0c469be9e0a -Author: Dinesh Israni -Date: Tue Dec 5 16:46:54 2017 -0800 - - Add support for snapshots - - Starts a snapshot controller as well as a provisioner to create PVC from - snapshots. - - Based on the snapshot work being done in kubernetes-incubator - - Issue #8 - -commit 7e326f356bb5310fa2cd28b442148c2e19026c9a -Author: Dinesh Israni -Date: Wed Dec 13 19:38:41 2017 -0800 - - Update volume driver Init() to take interface parameter - -commit d2346ec8c40f86608a5bd81624f6dba445bc781e -Author: Dinesh Israni -Date: Thu Dec 14 00:03:13 2017 -0800 - - Add VolumeName and ParentID in volumeInfo - -commit cd21e73f31bd38fa5f13b5571abc6f1b107188c6 -Author: Dinesh Israni -Date: Tue Dec 5 16:45:25 2017 -0800 - - Implement snapshot plugin for Portworx driver - - Issue #8 - -commit 1e1bf2b451db651b7b3387187ef4c3da567f2e9a -Author: Dinesh Israni -Date: Tue Dec 5 16:31:34 2017 -0800 - - Govendor update - -commit 7631942690491240ea8f5133c5b819e2570d2e17 -Author: Dinesh Israni -Date: Thu Dec 14 00:21:01 2017 -0800 - - Update stork-deployment to use v0.2 - -commit b67596ad70a23ad757da952c95fbef04e63447fc -Author: Dinesh Israni -Date: Mon Dec 11 21:48:31 2017 -0800 - - Update README.md - - Updated for HA scheduler - -commit 4be84d4c17321f80aa0e686b7169e86e5a4ab703 -Author: Dinesh Israni -Date: Mon Dec 11 21:45:58 2017 -0800 - - Delete stork-daemonset.yaml - -commit 89e4a2133f1ff138c51490b118aeb515b40cf0e0 -Author: Dinesh Israni -Date: Mon Dec 11 21:45:12 2017 -0800 - - Update stork-scheduler.yaml - - Add lock object name for leader election - -commit 5ca3a853f435c9ed4e4cad5a6056ce02275355f6 (tag: v0.2) -Author: Dinesh Israni -Date: Tue Dec 5 17:24:31 2017 -0800 - - Update version to 0.2 - -commit fd2884ac9b3f91ba5c034ea404ce7c33ecadcb33 -Author: Dinesh Israni -Date: Tue Nov 28 17:24:31 2017 -0800 - - Update ISSUE_TEMPLATE.md - -commit a3ca5200d0581b3347021a55d6aa16d3ca1fd649 -Author: Dinesh Israni -Date: Mon Nov 27 18:58:25 2017 -0800 - - Fix typo - -commit e2a38f867f2a086bc5fe4343ae6dc6b8e7157228 -Author: Dinesh Israni -Date: Mon Nov 27 14:20:45 2017 -0800 - - Update gitignore - -commit a321971f4d3751c90dbcf4841221e881c41a59ba -Author: Dinesh Israni -Date: Tue Nov 21 16:53:35 2017 -0800 - - Update travis to build integration tests - -commit 0448d5944a810d4fe44557a9a7492a35d4bd01cd -Author: Dinesh Israni -Date: Tue Nov 21 16:27:45 2017 -0800 - - Replace Sirupsen with sirupsen packages - -commit 895e879d9762bbf7a26a5bb494217ab53e300d02 -Author: Dinesh Israni -Date: Tue Nov 21 15:45:14 2017 -0800 - - Updates for Portworx driver - - - Fix typo in error struct - - Add check for length of volumes received from Inspect() - -commit 1c32343e58c596de2df147d197653e44a3d9b95b -Author: Dinesh Israni -Date: Mon Nov 20 19:26:09 2017 -0800 - - Govendor update - -commit 6e2fd3e1f89b6f686498f0bdcf8f11beda01a3bf -Author: Dinesh Israni -Date: Mon Nov 20 19:24:20 2017 -0800 - - Integration tests for extender and health monitor - - - Using torpedo APIs to start pods - - Verifying filter and prioritize behavior - - Verifying pods are deleted and relocated if driver fails on a node - - Issue #2 - -commit a59ddcdd0966c0eb6bc724dd2154351c782d4479 -Author: Dinesh Israni -Date: Mon Nov 20 17:11:56 2017 -0800 - - Update for unit tests - - - Add tags for unit test - - Fix noDriverVolumeTest to use a non-Mock volume in pod - -commit 2cd6ed48aaee6e6078d2926322bb80c7a53bc3ee -Author: Dinesh Israni -Date: Mon Nov 13 18:22:55 2017 -0800 - - Add initial monitor to check for health of driver on nodes - - If driver on a node is unhealthy, delete all pods using the driver - on that node - - Issue #3 - -commit dcf1ddbbbc2e830b7d5a8386e539814fb3c9ff37 -Author: Dinesh Israni -Date: Mon Nov 13 18:19:12 2017 -0800 - - Update k8sutils to get all pods and delete a pod - -commit 50940c39dbe886e52ac19ebb762b8737c2c0fbbe -Author: Dinesh Israni -Date: Mon Nov 13 18:18:30 2017 -0800 - - Add Start and Stop for extender - -commit 862ba2151c41dd651456bd4c09d9c4621e2912e1 -Author: Dinesh Israni -Date: Mon Nov 27 17:12:10 2017 -0800 - - Update README.md - -commit ed14a426b4673c011b05b890ce6fde765f6b17f0 -Author: Dinesh Israni -Date: Mon Nov 27 17:11:18 2017 -0800 - - Update README.md - -commit 3bcd6d3b9060f6a9520f92fa9ffd5d498ffcba1f -Author: Dinesh Israni -Date: Mon Nov 27 17:09:48 2017 -0800 - - Update stork-deployment.yaml - -commit 339e586b376806adb504f762a577fbe154b0df34 -Author: Dinesh Israni -Date: Mon Nov 27 15:10:18 2017 -0800 - - Update README.md - -commit e636512d0fe569590b8b918887b11754604da84c -Author: Dinesh Israni -Date: Mon Nov 27 15:09:49 2017 -0800 - - Create CONTRIBUTING.md - -commit 926cd3313513d51c7f03c9d2077c7150d8790f59 -Author: Dinesh Israni -Date: Mon Nov 27 14:57:15 2017 -0800 - - Create ISSUE_TEMPLATE.md - - Add issue template based on K8s template - -commit 586da254ebc883777d298d017a1d294711d4524d -Author: Dinesh Israni -Date: Tue Nov 7 15:20:24 2017 -0800 - - Update README.md - - Add some badges - -commit 45137f4cb917a18aab93ca55d714ddeb78c9dee3 -Author: Dinesh Israni -Date: Wed Nov 1 13:18:51 2017 -0700 - - Update .travis.yml - -commit 01ea5a696805de4ceb363e7c976cf38289daa317 -Author: Dinesh Israni -Date: Wed Nov 1 12:30:29 2017 -0700 - - Update .travis.yml - - Push to latest tag if build and tests pass - -commit 294eab2c52179948ad081cc710800b02b67c017c -Author: Dinesh Israni -Date: Tue Oct 31 15:51:33 2017 -0700 - - Update travis to run unit tests - -commit d339c974c0cd63c1689fbb040cae25ffb59d668b -Author: Dinesh Israni -Date: Tue Oct 31 15:40:48 2017 -0700 - - Add UTs for scheduler extender - -commit f092a13879dfca76bdd8a55b461b0b80682dd1f1 -Author: Dinesh Israni -Date: Tue Oct 31 15:39:55 2017 -0700 - - Remove unused method from k8sutils - -commit 8310f288d30ae70d0ecd4166ebf366c277913949 -Author: Dinesh Israni -Date: Tue Oct 31 15:39:07 2017 -0700 - - Assign default scores to nodes even if there are no volumes for a driver - -commit 772e14a2b0ba4fcccd713d9aaf13ed6b74d3c382 -Author: Dinesh Israni -Date: Tue Oct 31 15:38:30 2017 -0700 - - Add Mock volume driver - -commit e24429bf087431d87f4dcedd2a15aa39c83e6992 -Author: Dinesh Israni -Date: Mon Oct 16 15:56:43 2017 -0700 - - Update README.md - -commit fdc62dbe2db6e397c30b66cd4d26a8bd3399a4e8 -Author: Dinesh Israni -Date: Mon Oct 16 15:50:22 2017 -0700 - - Update deployment instructions in README - -commit 4c9c1afd4365e8a1ac819d43bd39157fcd709ad7 -Author: Dinesh Israni -Date: Mon Oct 16 14:27:00 2017 -0700 - - Update specs - - - Add separate specs for deployment and daemonset - - Add example mysql pod spec - -commit d66d5457ffc4723f46d900fb4abf468a9d91c0fd -Author: Dinesh Israni -Date: Sun Oct 15 23:23:25 2017 -0700 - - Update stork spec - - - Fix typo in externder URL in config map - - Set hostNetwork to false for stork pod - -commit 0e4d120c7af1a85a19dc3e2491722c4b12011da9 -Author: Dinesh Israni -Date: Sun Oct 15 19:39:05 2017 -0700 - - Update stork scheduler pod name in spec - -commit 0c4408a187d4fb5e61ff685dae2387a5be2d1d63 -Author: Dinesh Israni -Date: Sun Oct 15 19:32:24 2017 -0700 - - Move spec files - - - Added spec for scheduler which can be used if users don't want to change - the config for their default scheduler - - Added config map in stork spec which can be used by any scheduler - -commit c2047e86796a7bd8d823235275c326977763857f -Author: Dinesh Israni -Date: Sat Oct 14 14:16:51 2017 -0700 - - Add pretest to travis build - -commit 531d0988f3692253a81d73110c4c06fbfe37c69b -Author: Dinesh Israni -Date: Sat Oct 14 14:15:44 2017 -0700 - - Add error checks - -commit 848c9f3d626a7c5bced0e9ca41d83188db6df935 -Author: Dinesh Israni -Date: Sat Oct 14 13:54:36 2017 -0700 - - Update deploy target in Makefile to only push docker image - -commit 8b24e2ed06c7e40d748f859505a23ec10e81ff1e -Author: Dinesh Israni -Date: Sat Oct 14 13:48:10 2017 -0700 - - Update .travis.yml - -commit 09dd095a127f7b0bd7984f7a90cd4cdf706fb3e9 -Author: Dinesh Israni -Date: Fri Oct 13 20:47:34 2017 -0700 - - Update .travis.yml - -commit 7f795951fae21ce42cc3ec0b204c8780145cc895 -Author: Dinesh Israni -Date: Fri Oct 13 20:43:15 2017 -0700 - - Update .travis.yml - -commit d04c3db91dba90da85dd3a0effc34b2bc4611466 -Author: Dinesh Israni -Date: Fri Oct 13 20:23:13 2017 -0700 - - Add travis build - -commit 4674dc3ae5c80df99ecc76f08e0567475fc0e75b -Author: Dinesh Israni -Date: Fri Oct 13 19:43:23 2017 -0700 - - Make the priority scores into constants - -commit b2214887a6ec8be9ff192438a50d0892845078e0 -Author: Dinesh Israni -Date: Fri Oct 13 19:42:52 2017 -0700 - - Update logs to print pod information to help with debugging - -commit 9d1faa6df04f25689786b36d09371a08a4946218 -Author: Dinesh Israni -Date: Fri Oct 13 19:06:54 2017 -0700 - - Replace image name in pod spec with a tag - -commit 98ffffd62f216b001262ecda5911a3641abe5584 -Author: Dinesh Israni -Date: Fri Oct 13 18:58:11 2017 -0700 - - Initial commit for a scheduler extender - - - Serves 'filter' and 'prioritize' requests - - Filters out nodes where the driver is not running or unhealthy - - Prioritizes nodes where the volume has been allocated - - For each volume on a node, the score is bumped by 100 - - Any node that doesn't have data is given a score of 10 so that it - isn't ignored by the scheduler - -commit d6118dca35066c915a59e23e71b49574d277f510 -Author: Dinesh Israni -Date: Tue Oct 3 15:29:05 2017 -0700 - - Govendor update - -commit e1ef31eedeeacdffa8760aa88fc2bf2c1764a6f5 -Author: Dinesh Israni -Date: Tue Sep 26 22:31:51 2017 -0700 - - Update readme - -commit a9a2a9ec1e18f8063b0940bf42cd7d2bd09667d6 -Author: Gou Rao -Date: Tue Sep 26 11:21:26 2017 -0700 - - Update README.md - -commit af7dfd032898990740359501bc9ed91becfbe896 -Author: Gou Rao -Date: Tue Sep 26 17:02:59 2017 +0000 - - new logo - -commit ff9853640cb11f8c8c374ec21d807fd33003f288 -Author: Gou Rao -Date: Fri Sep 22 18:01:22 2017 -0700 - - Update README.md - -commit c896d9dcd56f6456f972e25015713c4b6c821685 -Author: Gou Rao -Date: Thu Sep 7 18:45:28 2017 +0000 - - added a vendor dir - -commit b1931f336c02cd89092fce1bb86047d970a3084c -Author: Gou Rao -Date: Wed Sep 6 20:24:55 2017 +0000 - - Add pod spec example to readme - -commit 4021ab76b1cba1c0f66eef14d75614dcf4b5aa15 -Author: Gou Rao -Date: Wed Sep 6 19:51:20 2017 +0000 - - Add pod spec example to readme - -commit e7988e32def210ceaa6d9ce8af831a01e3093cdb -Author: Gou Rao -Date: Wed Sep 6 19:44:06 2017 +0000 - - Add pod spec example to readme - -commit 87420d6c0815d80d29f7ac4082f1ae66bee76cb3 -Author: Gou Rao -Date: Wed Sep 6 06:00:09 2017 +0000 - - redo logo - -commit eecf1be14776e24ee21eec52bda59f981c84734d -Author: Gou Rao -Date: Wed Sep 6 05:47:02 2017 +0000 - - redo logo - -commit b134fbc6bb06ca291861e11bbb29934b30ce9c0b -Author: Gou Rao -Date: Wed Sep 6 05:44:27 2017 +0000 - - redo logo - -commit 17dffab1dab6e9b2b21fe686c1a7de4a1453d3ee -Author: Gou Rao -Date: Wed Sep 6 05:42:00 2017 +0000 - - redo logo - -commit da0d9394feefac1e44931a095f0f5b4d791a1959 -Author: Gou Rao -Date: Wed Sep 6 05:40:58 2017 +0000 - - redo logo - -commit 96af825a793cc0d0ebcb673b77a8ac6b36353e13 -Author: Gou Rao -Date: Wed Sep 6 05:38:20 2017 +0000 - - redo logo - -commit 083d5c68ff27d00db39ff9111e4b6e7d52ebb320 -Author: Gou Rao -Date: Wed Sep 6 05:37:45 2017 +0000 - - redo logo - -commit 9b93c3b2624cbd2b5a3b0556aafe436fb4ccb8e8 -Author: Gou Rao -Date: Wed Sep 6 05:35:59 2017 +0000 - - redo logo - -commit a1f08c0ca06eb2f0a5d1d18150f7a966a51268c0 -Author: Gou Rao -Date: Wed Sep 6 05:12:09 2017 +0000 - - initial skeleton framework - -commit 6df9f0dec16d433cef9ff28465cfcd1660844cae -Author: Gou Rao -Date: Wed Sep 6 05:11:38 2017 +0000 - - initial skeleton framework - -commit e1f2f1578f1b02aca9dc0770fee9632788ee6344 -Author: venkatpx -Date: Tue Sep 5 21:20:51 2017 -0700 - - Initial commit From 9bdd7f157bde0db5ace08c4d8fbed244ab382283 Mon Sep 17 00:00:00 2001 From: Prashanth Kumar Date: Mon, 7 Nov 2022 09:49:26 +0000 Subject: [PATCH 76/97] Prashanth - Addressing review comments --- .../controllers/applicationbackup.go | 40 +++++++++---------- .../controllers/applicationrestore.go | 28 ++++++------- pkg/utils/utils.go | 4 ++ 3 files changed, 36 insertions(+), 36 deletions(-) diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index 48b2a5529e..5d2bd1938b 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -215,6 +215,10 @@ func (a *ApplicationBackupController) createBackupLocationPath(backup *stork_api if err != nil { return fmt.Errorf("error getting backup location path: %v", err) } + // For NFS skip creating path + if backupLocation.Location.Type == stork_api.BackupLocationNFS { + return nil + } if err := objectstore.CreateBucket(backupLocation); err != nil { return fmt.Errorf("error creating backup location path: %v", err) } @@ -307,21 +311,13 @@ func (a *ApplicationBackupController) handle(ctx context.Context, backup *stork_ return nil } } - - // Try to create the backupLocation path, just log error if it fails - backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) + err := a.createBackupLocationPath(backup) if err != nil { - return fmt.Errorf("error getting backup location path: %v", err) - } - if backupLocation.Location.Type != stork_api.BackupLocationNFS { - err := a.createBackupLocationPath(backup) - if err != nil { - log.ApplicationBackupLog(backup).Errorf(err.Error()) - a.recorder.Event(backup, - v1.EventTypeWarning, - string(stork_api.ApplicationBackupStatusFailed), - err.Error()) - } + log.ApplicationBackupLog(backup).Errorf(err.Error()) + a.recorder.Event(backup, + v1.EventTypeWarning, + string(stork_api.ApplicationBackupStatusFailed), + err.Error()) } // Make sure the rules exist if configured @@ -1166,7 +1162,7 @@ func IsNFSBackuplocationType( ) (bool, error) { backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) if err != nil { - return false, fmt.Errorf("error getting backup location path: %v", err) + return false, fmt.Errorf("error getting backup location path for backup [%v/%v]: %v", backup.Namespace, backup.Name, err) } if backupLocation.Location.Type == stork_api.BackupLocationNFS { return true, nil @@ -1187,7 +1183,8 @@ func (a *ApplicationBackupController) backupResources( var resourceTypes []metav1.APIResource nfs, err := IsNFSBackuplocationType(backup) if err != nil { - logrus.Errorf("error in checking backuplocation type") + logrus.Errorf("error in checking backuplocation type: %v", err) + return err } // Listing all resource types if len(backup.Spec.ResourceTypes) != 0 { @@ -1341,7 +1338,7 @@ func (a *ApplicationBackupController) backupResources( } if nfs { - // Check whether ResourceExport is preset or not + // Check whether ResourceExport is present or not crName := getResourceExportCRName(utils.PrefixBackup, string(backup.UID), backup.Namespace) resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, a.backupAdminNamespace) if err != nil { @@ -1380,8 +1377,8 @@ func (a *ApplicationBackupController) backupResources( // Hardcoding for now. // APIVersion: backupLocation.APIVersion, // Kind: backupLocation.Kind, - APIVersion: "stork.libopenstorage.org/v1alpha1", - Kind: "BackupLocation", + APIVersion: utils.StorkAPIVersion, + Kind: utils.BackupLocationKind, Namespace: backupLocation.Namespace, Name: backupLocation.Name, } @@ -1397,12 +1394,12 @@ func (a *ApplicationBackupController) backupResources( _, err = kdmpShedOps.Instance().CreateResourceExport(resourceExport) if err != nil { - logrus.Errorf("failed to create DataExport CR: %v", err) + logrus.Errorf("failed to create ResourceExport CR[%v/%v]: %v", resourceExport.Namespace, resourceExport.Name, err) return err } return nil } - logrus.Errorf("failed to get backup resourceExport CR: %v", err) + logrus.Errorf("failed to get backup resourceExport CR[%v/%v]: %v", resourceExport.Namespace, resourceExport.Name, err) // Will retry in the next cycle of reconciler. return nil } else { @@ -1415,6 +1412,7 @@ func (a *ApplicationBackupController) backupResources( backup.Status.Stage = stork_api.ApplicationBackupStageFinal backup.Status.Reason = message backup.Status.LastUpdateTimestamp = metav1.Now() + backup.Status.FinishTimestamp = metav1.Now() err = a.client.Update(context.TODO(), backup) if err != nil { return err diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index 831948ef80..9a84cdb885 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -549,6 +549,7 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat nfs, err := IsNFSBackuplocationType(backup) if err != nil { logrus.Errorf("error in checking backuplocation type") + return err } if len(restore.Status.Volumes) != pvcCount { // Here backupVolumeInfoMappings is framed based on driver name mapping, hence startRestore() @@ -564,7 +565,6 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat // s3 + EBS/GKE/Azure = legacy code path if !nfs || (nfs && driverName != volume.KDMPDriverName) { existingRestoreVolInfos := make([]*storkapi.ApplicationRestoreVolumeInfo, 0) - //driver, err := volume.Get(driverName) if err != nil { return err } @@ -640,7 +640,7 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat return err } } - // Check whether ResourceExport is preset or not + // Check whether ResourceExport is present or not if nfs && driverName == volume.KDMPDriverName { err = a.client.Update(context.TODO(), restore) if err != nil { @@ -685,10 +685,8 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat destination := &kdmpapi.ResourceExportObjectReference{ // TODO: GetBackupLocation is not returning APIVersion and kind. // Hardcoding for now. - // APIVersion: backupLocation.APIVersion, - // Kind: backupLocation.Kind, - APIVersion: "stork.libopenstorage.org/v1alpha1", - Kind: "BackupLocation", + APIVersion: utils.StorkAPIVersion, + Kind: utils.BackupLocationKind, Namespace: backupLocation.Namespace, Name: backupLocation.Name, } @@ -839,7 +837,6 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat restore.Status.FinishTimestamp = metav1.Now() restore.Status.Status = storkapi.ApplicationRestoreStatusFailed restore.Status.Reason = vInfo.Reason - break } else if vInfo.Status == storkapi.ApplicationRestoreStatusSuccessful { a.recorder.Event(restore, @@ -1462,7 +1459,7 @@ func (a *ApplicationRestoreController) restoreResources( } nfs, err := IsNFSBackuplocationType(backup) if err != nil { - logrus.Errorf("error in checking backuplocation type") + logrus.Errorf("error in checking backuplocation type: %v", err) return err } @@ -1510,15 +1507,15 @@ func (a *ApplicationRestoreController) restoreResources( } backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) if err != nil { - return fmt.Errorf("error getting backup location path: %v", err) + return fmt.Errorf("error getting backup location path %v: %v", backup.Spec.BackupLocation, err) } destination := &kdmpapi.ResourceExportObjectReference{ // TODO: .GetBackupLocation is not returning APIVersion and kind. // Hardcoding for now. // APIVersion: backupLocation.APIVersion, // Kind: backupLocation.Kind, - APIVersion: "stork.libopenstorage.org/v1alpha1", - Kind: "BackupLocation", + APIVersion: utils.StorkAPIVersion, + Kind: utils.BackupLocationKind, Namespace: backupLocation.Namespace, Name: backupLocation.Name, } @@ -1526,7 +1523,7 @@ func (a *ApplicationRestoreController) restoreResources( resourceExport.Spec.Destination = *destination _, err = kdmpShedOps.Instance().CreateResourceExport(resourceExport) if err != nil { - logrus.Errorf("failed to create DataExport CR: %v", err) + logrus.Errorf("failed to create ResourceExport CR[%v/%v]: %v", resourceExport.Namespace, resourceExport.Name, err) return err } return nil @@ -1545,6 +1542,7 @@ func (a *ApplicationRestoreController) restoreResources( restore.Status.Stage = storkapi.ApplicationRestoreStageFinal restore.Status.Reason = message restore.Status.LastUpdateTimestamp = metav1.Now() + restore.Status.FinishTimestamp = metav1.Now() err = a.client.Update(context.TODO(), restore) if err != nil { return err @@ -1568,7 +1566,6 @@ func (a *ApplicationRestoreController) restoreResources( restore.Status.FinishTimestamp = metav1.Now() restore.Status.Status = storkapi.ApplicationRestoreStatusSuccessful restore.Status.Reason = "Volumes and resources were restored up successfully" - case kdmpapi.ResourceExportStatusInitial: doCleanup = false case kdmpapi.ResourceExportStatusPending: @@ -1577,6 +1574,7 @@ func (a *ApplicationRestoreController) restoreResources( restore.Status.LastUpdateTimestamp = metav1.Now() doCleanup = false } + restore.Status.LastUpdateTimestamp = metav1.Now() err = a.client.Update(context.TODO(), restore) if err != nil { return err @@ -1741,14 +1739,14 @@ func (a *ApplicationRestoreController) cleanupResources(restore *storkapi.Applic crName := getResourceExportCRName(utils.PrefixRestore, string(restore.UID), restore.Namespace) err := kdmpShedOps.Instance().DeleteResourceExport(crName, a.restoreAdminNamespace) if err != nil && !k8s_errors.IsNotFound(err) { - errMsg := fmt.Sprintf("failed to delete resource export CR [%v]: %v", crName, err) + errMsg := fmt.Sprintf("failed to delete restore resource export CR [%v]: %v", crName, err) log.ApplicationRestoreLog(restore).Errorf("%v", errMsg) return err } crName = getResourceExportCRName(utils.PrefixNFSRestorePVC, string(restore.UID), restore.Namespace) err = kdmpShedOps.Instance().DeleteResourceExport(crName, restore.Namespace) if err != nil && !k8s_errors.IsNotFound(err) { - errMsg := fmt.Sprintf("failed to delete resource export CR [%v]: %v", crName, err) + errMsg := fmt.Sprintf("failed to delete pvc creation resource export CR [%v]: %v", crName, err) log.ApplicationRestoreLog(restore).Errorf("%v", errMsg) return err } diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index c10603f6a8..a2be450aee 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -61,6 +61,10 @@ const ( PxbackupObjectNameKey = PxbackupAnnotationPrefix + "backup-name" // SkipResourceAnnotation - annotation value to skip resource during resource collector SkipResourceAnnotation = "stork.libopenstorage.org/skip-resource" + // StorkAPIVersion API version + StorkAPIVersion = "stork.libopenstorage.org/v1alpha1" + // BackupLocationKind CR kind + BackupLocationKind = "BackupLocation" ) // ParseKeyValueList parses a list of key=values string into a map From 0e12732ff450c6d91b2051251ff065fdf24a6352 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Mon, 7 Nov 2022 16:18:26 +0000 Subject: [PATCH 77/97] travis file change for 2.12-nfs branch --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 880adafb8d..1f1673ebcb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,7 +12,7 @@ cache: - $HOME/.cache/go-build script: - | - if [ "${TRAVIS_BRANCH}" == "2.12" ]; then + if [ "${TRAVIS_BRANCH}" == "2.12-nfs" ]; then export DOCKER_HUB_STORK_TAG="${TRAVIS_BRANCH}"-dev export DOCKER_HUB_STORK_TEST_TAG="${TRAVIS_BRANCH}"-dev export DOCKER_HUB_CMD_EXECUTOR_TAG="${TRAVIS_BRANCH}"-dev From 899660f7c2650eb27c8789046b19bc3c87ad6329 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Mon, 7 Nov 2022 18:29:37 +0000 Subject: [PATCH 78/97] vendor latest kdmp branch from 1.2.3 branch --- go.mod | 2 +- go.sum | 2 ++ vendor/github.com/portworx/kdmp/pkg/version/version.go | 2 +- vendor/modules.txt | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d0b18e694d..784e958213 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/openshift/api v0.0.0-20210105115604-44119421ec6b github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47 github.com/pborman/uuid v1.2.0 - github.com/portworx/kdmp v0.4.1-0.20221106080928-1fd01d5e6ed0 + github.com/portworx/kdmp v0.4.1-0.20221107154253-815cc97f6e1b github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 diff --git a/go.sum b/go.sum index 00c8724e25..8272df6c3b 100644 --- a/go.sum +++ b/go.sum @@ -1433,6 +1433,8 @@ github.com/portworx/kdmp v0.4.1-0.20220902105026-dc14791e1508/go.mod h1:sTO9LkPk github.com/portworx/kdmp v0.4.1-0.20220905153748-e0bb69e59f38/go.mod h1:NI2UgLITtggRvvaRA7lE4+Np4htDp+06Jf1LWksKyGA= github.com/portworx/kdmp v0.4.1-0.20221106080928-1fd01d5e6ed0 h1:orOtVtS8VcmKiorxN0E83QrTpUFiCQ5OMVOJaqhivOk= github.com/portworx/kdmp v0.4.1-0.20221106080928-1fd01d5e6ed0/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= +github.com/portworx/kdmp v0.4.1-0.20221107154253-815cc97f6e1b h1:b2IXNcmXuTyacLsTcpc1dwL2thN0zyQEtZM7uLsjjlk= +github.com/portworx/kdmp v0.4.1-0.20221107154253-815cc97f6e1b/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= diff --git a/vendor/github.com/portworx/kdmp/pkg/version/version.go b/vendor/github.com/portworx/kdmp/pkg/version/version.go index 11242843d1..0d0e7bc0c2 100644 --- a/vendor/github.com/portworx/kdmp/pkg/version/version.go +++ b/vendor/github.com/portworx/kdmp/pkg/version/version.go @@ -24,7 +24,7 @@ const ( // // These variables typically come from -ldflags settings. var ( - gitVersion = "master" + gitVersion = "1.2.3" gitCommit = "" // sha1 from git, output of $(git rev-parse HEAD) buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') kbVerRegex = regexp.MustCompile(`^(v\d+\.\d+\.\d+)(.*)`) diff --git a/vendor/modules.txt b/vendor/modules.txt index 7ed68bddcd..d3a67cddce 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -691,7 +691,7 @@ github.com/pierrec/lz4/internal/xxh32 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/portworx/kdmp v0.4.1-0.20221106080928-1fd01d5e6ed0 +# github.com/portworx/kdmp v0.4.1-0.20221107154253-815cc97f6e1b ## explicit github.com/portworx/kdmp/pkg/apis/kdmp github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1 From ed82856cc704e7feadeb78de0b18507bfb3f26cb Mon Sep 17 00:00:00 2001 From: diptiranjanpx Date: Tue, 8 Nov 2022 11:07:14 +0000 Subject: [PATCH 79/97] Vendoring 1.2.3 kdmp latest. --- go.mod | 2 +- go.sum | 2 ++ .../portworx/kdmp/pkg/controllers/dataexport/reconcile.go | 3 +++ vendor/modules.txt | 2 +- 4 files changed, 7 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 784e958213..344b75634f 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/openshift/api v0.0.0-20210105115604-44119421ec6b github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47 github.com/pborman/uuid v1.2.0 - github.com/portworx/kdmp v0.4.1-0.20221107154253-815cc97f6e1b + github.com/portworx/kdmp v0.4.1-0.20221108105735-bf0aea42dfe7 github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 diff --git a/go.sum b/go.sum index 8272df6c3b..dcc7f6fa61 100644 --- a/go.sum +++ b/go.sum @@ -1435,6 +1435,8 @@ github.com/portworx/kdmp v0.4.1-0.20221106080928-1fd01d5e6ed0 h1:orOtVtS8VcmKior github.com/portworx/kdmp v0.4.1-0.20221106080928-1fd01d5e6ed0/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kdmp v0.4.1-0.20221107154253-815cc97f6e1b h1:b2IXNcmXuTyacLsTcpc1dwL2thN0zyQEtZM7uLsjjlk= github.com/portworx/kdmp v0.4.1-0.20221107154253-815cc97f6e1b/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= +github.com/portworx/kdmp v0.4.1-0.20221108105735-bf0aea42dfe7 h1:P4tgSnnVK4R7nd50irFb2fbshYN5pLjv6IMyl/4M76o= +github.com/portworx/kdmp v0.4.1-0.20221108105735-bf0aea42dfe7/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= diff --git a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go index 9e5fb09233..e523f385e1 100644 --- a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go +++ b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go @@ -293,6 +293,9 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er return false, c.updateStatus(dataExport, data) } + if backupLocation.Location.Type != storkapi.BackupLocationNFS { + backupLocation.Location.NfsConfig = &storkapi.NfsConfig{} + } // start data transfer id, err := startTransferJob( driver, diff --git a/vendor/modules.txt b/vendor/modules.txt index d3a67cddce..b609d46f41 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -691,7 +691,7 @@ github.com/pierrec/lz4/internal/xxh32 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/portworx/kdmp v0.4.1-0.20221107154253-815cc97f6e1b +# github.com/portworx/kdmp v0.4.1-0.20221108105735-bf0aea42dfe7 ## explicit github.com/portworx/kdmp/pkg/apis/kdmp github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1 From 9779f9a37ba8951bd777adfa7d66f31668415523 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Tue, 8 Nov 2022 17:59:54 +0000 Subject: [PATCH 80/97] vendor kdmp repo from 1.2.3 branch --- go.mod | 2 +- go.sum | 2 ++ vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go | 5 +++++ vendor/modules.txt | 2 +- 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 344b75634f..e610bf277d 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/openshift/api v0.0.0-20210105115604-44119421ec6b github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47 github.com/pborman/uuid v1.2.0 - github.com/portworx/kdmp v0.4.1-0.20221108105735-bf0aea42dfe7 + github.com/portworx/kdmp v0.4.1-0.20221108170154-53a356d7abea github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 diff --git a/go.sum b/go.sum index dcc7f6fa61..82d8284d22 100644 --- a/go.sum +++ b/go.sum @@ -1437,6 +1437,8 @@ github.com/portworx/kdmp v0.4.1-0.20221107154253-815cc97f6e1b h1:b2IXNcmXuTyacLs github.com/portworx/kdmp v0.4.1-0.20221107154253-815cc97f6e1b/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kdmp v0.4.1-0.20221108105735-bf0aea42dfe7 h1:P4tgSnnVK4R7nd50irFb2fbshYN5pLjv6IMyl/4M76o= github.com/portworx/kdmp v0.4.1-0.20221108105735-bf0aea42dfe7/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= +github.com/portworx/kdmp v0.4.1-0.20221108170154-53a356d7abea h1:1tg0uqjrm2KqTjyY5bDqpzjHZUM7TaSjSiSUxzgIKto= +github.com/portworx/kdmp v0.4.1-0.20221108170154-53a356d7abea/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go index a0c856811a..4939606379 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go @@ -679,6 +679,8 @@ func CreateNfsPv(pvName string, }, }, Spec: corev1.PersistentVolumeSpec{ + // Setting it to empty stringm so that default storage class will not selected. + StorageClassName: "", AccessModes: []corev1.PersistentVolumeAccessMode{ "ReadWriteMany", }, @@ -714,6 +716,7 @@ func CreateNfsPv(pvName string, // CreateNfsPvc - Create a persistent volume claim for NFS specific jobs func CreateNfsPvc(pvcName string, pvName string, namespace string) error { fn := "CreateNfsPvc" + empttyStorageClass := "" pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, @@ -723,6 +726,8 @@ func CreateNfsPvc(pvcName string, pvName string, namespace string) error { }, }, Spec: corev1.PersistentVolumeClaimSpec{ + // Setting it to empty stringm so that default storage class will not selected. + StorageClassName: &empttyStorageClass, AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ diff --git a/vendor/modules.txt b/vendor/modules.txt index b609d46f41..760beec1a2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -691,7 +691,7 @@ github.com/pierrec/lz4/internal/xxh32 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/portworx/kdmp v0.4.1-0.20221108105735-bf0aea42dfe7 +# github.com/portworx/kdmp v0.4.1-0.20221108170154-53a356d7abea ## explicit github.com/portworx/kdmp/pkg/apis/kdmp github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1 From 4f396e0ea0d95c7b8c36d41aa0fa33fd77a77bc3 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Wed, 9 Nov 2022 15:27:15 +0000 Subject: [PATCH 81/97] vendor latest kdmp from 1.2.3 branch --- go.mod | 2 +- go.sum | 2 ++ .../portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go | 2 +- .../portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go | 2 +- .../github.com/portworx/kdmp/pkg/drivers/utils/utils.go | 8 +++++++- vendor/modules.txt | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index e610bf277d..a61f47991d 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/openshift/api v0.0.0-20210105115604-44119421ec6b github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47 github.com/pborman/uuid v1.2.0 - github.com/portworx/kdmp v0.4.1-0.20221108170154-53a356d7abea + github.com/portworx/kdmp v0.4.1-0.20221109152346-d4c8431982fc github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 diff --git a/go.sum b/go.sum index 82d8284d22..df1cca0dda 100644 --- a/go.sum +++ b/go.sum @@ -1439,6 +1439,8 @@ github.com/portworx/kdmp v0.4.1-0.20221108105735-bf0aea42dfe7 h1:P4tgSnnVK4R7nd5 github.com/portworx/kdmp v0.4.1-0.20221108105735-bf0aea42dfe7/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kdmp v0.4.1-0.20221108170154-53a356d7abea h1:1tg0uqjrm2KqTjyY5bDqpzjHZUM7TaSjSiSUxzgIKto= github.com/portworx/kdmp v0.4.1-0.20221108170154-53a356d7abea/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= +github.com/portworx/kdmp v0.4.1-0.20221109152346-d4c8431982fc h1:RSt7AXX5gMmV1h9OOUhwDoUbBxUfqbOJk79NqOQB3Qw= +github.com/portworx/kdmp v0.4.1-0.20221109152346-d4c8431982fc/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go index 6322a24348..6b954ed6e1 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go @@ -191,7 +191,7 @@ func jobForBackupResource( nfsExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.NfsExecutorImage, jobOption.NfsImageExecutorSource, jobOption.NfsImageExecutorSourceNs, - jobOption.JobName, + jobOption.RestoreExportName, jobOption) if err != nil { diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go index 51908c3092..be5c95a6e9 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go @@ -211,7 +211,7 @@ func jobForRestoreResource( nfsExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.NfsExecutorImage, jobOption.NfsImageExecutorSource, jobOption.NfsImageExecutorSourceNs, - jobOption.JobName, + jobOption.RestoreExportName, jobOption) if err != nil { logrus.Errorf("failed to get the executor image details") diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go index 4939606379..b2ff03f585 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go @@ -352,7 +352,13 @@ func GetExecutorImageAndSecret(executorImageType, deploymentName, deploymentNs, } } if len(imageRegistrySecret) != 0 { - err = CreateImageRegistrySecret(imageRegistrySecret, jobName, jobOption.KopiaImageExecutorSourceNs, jobOption.Namespace) + var secretSourceNs string + if executorImageType == drivers.NfsExecutorImage { + secretSourceNs = jobOption.NfsImageExecutorSourceNs + } else { + secretSourceNs = jobOption.KopiaImageExecutorSourceNs + } + err = CreateImageRegistrySecret(imageRegistrySecret, jobName, secretSourceNs, jobOption.Namespace) if err != nil { return "", "", err } diff --git a/vendor/modules.txt b/vendor/modules.txt index 760beec1a2..8fda0e9769 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -691,7 +691,7 @@ github.com/pierrec/lz4/internal/xxh32 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/portworx/kdmp v0.4.1-0.20221108170154-53a356d7abea +# github.com/portworx/kdmp v0.4.1-0.20221109152346-d4c8431982fc ## explicit github.com/portworx/kdmp/pkg/apis/kdmp github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1 From aba5b9fa31f2510014b5e245049479b63e527ede Mon Sep 17 00:00:00 2001 From: Kesavan Thiruvenkadasamy Date: Thu, 10 Nov 2022 11:03:04 +0530 Subject: [PATCH 82/97] vendor in kdmp 1.2.3 branch Signed-off-by: Kesavan Thiruvenkadasamy --- go.mod | 4 +- go.sum | 82 +------------------ .../pkg/drivers/kopiabackup/kopiabackup.go | 7 ++ .../drivers/kopiabackup/kopiabackuplive.go | 7 ++ .../pkg/drivers/kopiadelete/kopiadelete.go | 18 +++- .../kopiamaintenance/kopiamaintenance.go | 18 ++++ .../pkg/drivers/kopiarestore/kopiarestore.go | 7 ++ .../kdmp/pkg/drivers/nfsbackup/nfsbackup.go | 7 ++ .../kdmp/pkg/drivers/nfsdelete/nfsdelete.go | 22 ++++- .../kdmp/pkg/drivers/nfsrestore/nfsrestore.go | 7 ++ .../portworx/kdmp/pkg/drivers/utils/utils.go | 24 +++++- vendor/modules.txt | 5 +- 12 files changed, 118 insertions(+), 90 deletions(-) diff --git a/go.mod b/go.mod index a61f47991d..b132babb31 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/openshift/api v0.0.0-20210105115604-44119421ec6b github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47 github.com/pborman/uuid v1.2.0 - github.com/portworx/kdmp v0.4.1-0.20221109152346-d4c8431982fc + github.com/portworx/kdmp v0.4.1-0.20221110051828-bdbb3afc4dd0 github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 @@ -43,9 +43,7 @@ require ( golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a google.golang.org/api v0.30.0 google.golang.org/grpc v1.48.0 - google.golang.org/grpc/examples v0.0.0-20220715232852-f601dfac73c9 // indirect gopkg.in/yaml.v2 v2.4.0 - gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.24.0 k8s.io/apiextensions-apiserver v0.21.5 k8s.io/apimachinery v0.24.3 diff --git a/go.sum b/go.sum index df1cca0dda..495fb0f601 100644 --- a/go.sum +++ b/go.sum @@ -266,6 +266,7 @@ github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZw github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/banzaicloud/k8s-objectmatcher v1.5.1/go.mod h1:9MWY5HsM/OaTmoTirczhlO8UALbH722WgdpaaR7Y8OE= github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -319,7 +320,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= @@ -402,7 +402,6 @@ github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pq github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -1111,8 +1110,6 @@ github.com/libopenstorage/cloudops v0.0.0-20200604165016-9cc0977d745e/go.mod h1: github.com/libopenstorage/cloudops v0.0.0-20220420143942-8bdd341e5b41/go.mod h1:zigCEUGrJZbK/1FN6+SHMuMjS6vjeSKxuo0G4Ars4Cg= github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 h1:q21CLGSi9DhNBBuJuitquA/T6FwLV3KNZxaJpxQbOLc= github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10/go.mod h1:nffpoeodwwp+wwngmBGbLBCd7TZ9GxHLtxKoaLRW6K4= -github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 h1:mHp7bfGyHwG4P8dhHEMJ775KLmcjv3tcA2Uc+5nGpXg= -github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7/go.mod h1:nffpoeodwwp+wwngmBGbLBCd7TZ9GxHLtxKoaLRW6K4= github.com/libopenstorage/gossip v0.0.0-20190507031959-c26073a01952/go.mod h1:TjXt2Iz2bTkpfc4Q6xN0ttiNipTVwEEYoZSMZHlfPek= github.com/libopenstorage/gossip v0.0.0-20200808224301-d5287c7c8b24/go.mod h1:TjXt2Iz2bTkpfc4Q6xN0ttiNipTVwEEYoZSMZHlfPek= github.com/libopenstorage/gossip v0.0.0-20220309192431-44c895e0923e h1:4l9N2Sw8VGGUqe50yC2BnTFMRJuHJGpIGZcCUZ2S6gg= @@ -1321,7 +1318,6 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= -github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= @@ -1337,7 +1333,6 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= @@ -1354,7 +1349,6 @@ github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= @@ -1427,20 +1421,11 @@ github.com/portworx/kdmp v0.4.1-0.20211103043446-cc5455f203d0/go.mod h1:BZ9ApnLF github.com/portworx/kdmp v0.4.1-0.20211108115338-ba2bebf06ffb/go.mod h1:cbaFBCLFTtF0taXtGR2zGD89k0gl7fNl+n4Vi9p4gmI= github.com/portworx/kdmp v0.4.1-0.20220309093511-f7b925b9e53e/go.mod h1:RAXbeaO/JmwQPRJCDdOoY/UsmGPY/awWsL4FbDOqAVk= github.com/portworx/kdmp v0.4.1-0.20220414053457-962507678379/go.mod h1:EAVroITfYd50a0vi/ScAILl6h5RYJteuO/pg1y3vNNw= -github.com/portworx/kdmp v0.4.1-0.20220710173715-5d42efc7d149 h1:KaRMV5hWbl7raiTFo20AZaXSIBBKCadzBmrXfwU+Id0= github.com/portworx/kdmp v0.4.1-0.20220710173715-5d42efc7d149/go.mod h1:nb5AupP/63ByyqAYfZ+E32LDEnP0PjgH6w+yKXxWIgE= github.com/portworx/kdmp v0.4.1-0.20220902105026-dc14791e1508/go.mod h1:sTO9LkPkExEVE6BqowIzkrQsyBtGdaC4Vh1AcKQ4xZA= github.com/portworx/kdmp v0.4.1-0.20220905153748-e0bb69e59f38/go.mod h1:NI2UgLITtggRvvaRA7lE4+Np4htDp+06Jf1LWksKyGA= -github.com/portworx/kdmp v0.4.1-0.20221106080928-1fd01d5e6ed0 h1:orOtVtS8VcmKiorxN0E83QrTpUFiCQ5OMVOJaqhivOk= -github.com/portworx/kdmp v0.4.1-0.20221106080928-1fd01d5e6ed0/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= -github.com/portworx/kdmp v0.4.1-0.20221107154253-815cc97f6e1b h1:b2IXNcmXuTyacLsTcpc1dwL2thN0zyQEtZM7uLsjjlk= -github.com/portworx/kdmp v0.4.1-0.20221107154253-815cc97f6e1b/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= -github.com/portworx/kdmp v0.4.1-0.20221108105735-bf0aea42dfe7 h1:P4tgSnnVK4R7nd50irFb2fbshYN5pLjv6IMyl/4M76o= -github.com/portworx/kdmp v0.4.1-0.20221108105735-bf0aea42dfe7/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= -github.com/portworx/kdmp v0.4.1-0.20221108170154-53a356d7abea h1:1tg0uqjrm2KqTjyY5bDqpzjHZUM7TaSjSiSUxzgIKto= -github.com/portworx/kdmp v0.4.1-0.20221108170154-53a356d7abea/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= -github.com/portworx/kdmp v0.4.1-0.20221109152346-d4c8431982fc h1:RSt7AXX5gMmV1h9OOUhwDoUbBxUfqbOJk79NqOQB3Qw= -github.com/portworx/kdmp v0.4.1-0.20221109152346-d4c8431982fc/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= +github.com/portworx/kdmp v0.4.1-0.20221110051828-bdbb3afc4dd0 h1:rIpCh+5iQCIQgpjUWsc1ToWXeX+NbtCiZm2leyPx5s4= +github.com/portworx/kdmp v0.4.1-0.20221110051828-bdbb3afc4dd0/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= @@ -1449,12 +1434,6 @@ github.com/portworx/px-backup-api v1.2.2-0.20210917042806-f2b0725444af/go.mod h1 github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 h1:VNBTmIPjJRZ2QP64zdsrif3ELDHiMzoyNNX74VNHgZ8= github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987/go.mod h1:g3pw2lI2AjqAixUCRhaBdKTY98znsCPR7NGRrlpimVU= github.com/portworx/pxc v0.33.0/go.mod h1:Tl7hf4K2CDr0XtxzM08sr9H/KsMhscjf9ydb+MnT0U4= -github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca h1:jrjwiQdqgDRsQZuiRDaWsbvx/z5t1icQPf7dgJOQUKE= -github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca/go.mod h1:0IQvado0rnmbRMORaCqCDrrzjBrX5sU+Sz2+vQwEsjM= -github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496 h1:4VuOzgXy6EU6zrVTEP4wlAaBUwdGA2jY1ckyjthTvb8= -github.com/portworx/sched-ops v1.20.4-rc1.0.20220824221759-f21d3c3b4496/go.mod h1:/xDBMzUV30kbdQYaPdAFcAYqEada6ZnWi4zt4KzFzAI= -github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a h1:qzoPM67cqkX6qJKzd1Wmbt9hZkY5kFYlqnbZMfG8qU0= -github.com/portworx/sched-ops v1.20.4-rc1.0.20220922150029-c1d35df2436a/go.mod h1:/xDBMzUV30kbdQYaPdAFcAYqEada6ZnWi4zt4KzFzAI= github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 h1:fPdQkWEXZt+kE4o/wm6KlhwhYNDhJJpoRakcI4LcE48= github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8/go.mod h1:8XBwmcbDuhW0TWFKCaHH4oS5xsfGFU5miSyqb0fvl3U= github.com/portworx/talisman v0.0.0-20210302012732-8af4564777f7/go.mod h1:e8a6uFpSbOlRpZQlW9aXYogC+GWAo065G0RL9hDkD4Q= @@ -1761,8 +1740,7 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= gocloud.dev v0.20.0 h1:mbEKMfnyPV7W1Rj35R1xXfjszs9dXkwSOq2KoFr25g8= gocloud.dev v0.20.0/go.mod h1:+Y/RpSXrJthIOM8uFNzWp6MRu9pFPNFEEZrQMxpkfIc= @@ -2076,7 +2054,6 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2296,7 +2273,6 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= @@ -2373,7 +2349,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20190709130402-674ba3eaed22/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -2382,8 +2357,6 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -helm.sh/helm/v3 v3.6.0 h1:/9IMxJ2lXJHbvTMHcW1AO71lXQHqDC+3bcpGp7yCsb8= -helm.sh/helm/v3 v3.6.0/go.mod h1:mIIus8EOqj+obtycw3sidsR4ORr2aFDmXMSI3k+oeVY= helm.sh/helm/v3 v3.6.1 h1:TQ6q4pAatXr7qh2fbLcb0oNd0I3J7kv26oo5cExKTtc= helm.sh/helm/v3 v3.6.1/go.mod h1:mIIus8EOqj+obtycw3sidsR4ORr2aFDmXMSI3k+oeVY= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2397,55 +2370,29 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.2/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -k8s.io/api v0.21.4 h1:WtDkzTAuI31WZKDPeIYpEUA+WeUfXAmA7gwj6nzFfbc= -k8s.io/api v0.21.4/go.mod h1:fTVGP+M4D8+00FN2cMnJqk/eb/GH53bvmNs2SVTmpFk= k8s.io/api v0.21.5 h1:9zp3SslPRB+rqxhGKqqTo6VsN3HX0Ype1nWV6UQQ+Sk= k8s.io/api v0.21.5/go.mod h1:Un8C5Hemo2r3MfPOjZvwQQ9KkBbiTBUCGrjlivo9uJ0= -k8s.io/apiextensions-apiserver v0.21.4 h1:HkajN/vmT/9HnFmUxvpXfSGkTCvH/ax4e3+j6mqWUDU= -k8s.io/apiextensions-apiserver v0.21.4/go.mod h1:OoC8LhI9LnV+wKjZkXIBbLUwtnOGJiTRE33qctH5CIk= k8s.io/apiextensions-apiserver v0.21.5 h1:sCUpiB47ba59J57ZsqOvoxD3voc2nnR+sylAzHIwI8w= k8s.io/apiextensions-apiserver v0.21.5/go.mod h1:iiakfVazpXLW8OkF2sH/p9XGgfE7XFSQuZFJ10QlXB4= -k8s.io/apimachinery v0.21.4 h1:KDq0lWZVslHkuE5I7iGAQHwpK0aDTlar1E7IWEc4CNw= -k8s.io/apimachinery v0.21.4/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= k8s.io/apimachinery v0.21.5 h1:56bnsHcUNboSCbD779GGi4Lh5kHTDFUoDrnHbhLTiaw= k8s.io/apimachinery v0.21.5/go.mod h1:3PfBV+4PPXNs0aueD+7fHcGyhdkFFYqXeshQtsKCi+4= -k8s.io/apiserver v0.21.4 h1:egJgdhW0ueq5iJSY0c5YedPvRM2Ft/D3dcXOgwvs9jY= -k8s.io/apiserver v0.21.4/go.mod h1:SErUuFBBPZUcD2nsUU8hItxoYheqyYr2o/pCINEPW8g= k8s.io/apiserver v0.21.5 h1:iEPvJ2uwmyb7C4eScOj1fgPKCyCUGgMQU5+UREE87vE= k8s.io/apiserver v0.21.5/go.mod h1:0bWmrAx3dxUUFSEw71U91Si5obhIvBcAmf8oVZUO58E= -k8s.io/cli-runtime v0.21.4 h1:kvOzx6dKg+9wRuHTzSqo8tfTV6ixZCkmi+ag54s7mn8= -k8s.io/cli-runtime v0.21.4/go.mod h1:eRbLHYkdVWzvG87yrkgGd8CqX6/+fAG9DTdAqTXmlRY= k8s.io/cli-runtime v0.21.5 h1:ZpPmrRsHvzdMzXrcr1/ZSBHLKrhS1aHyMr2hGJNlNpI= k8s.io/cli-runtime v0.21.5/go.mod h1:TKlcXsRVImtcPDGEe72pyZtD9UgBJNupIf3hmsIeekE= -k8s.io/client-go v0.21.4 h1:tcwj167If+v+pIGrCjaPG7hFo6SqFPFCCgMJy+Vm8Jc= -k8s.io/client-go v0.21.4/go.mod h1:t0/eMKyUAq/DoQ7vW8NVVA00/nomlwC+eInsS8PxSew= k8s.io/client-go v0.21.5 h1:zkVidiWVgciPKYqWpMFMjCUF+4rRXcfkKoyQS1Ue21k= k8s.io/client-go v0.21.5/go.mod h1:EUornVlr3rBrPKXUoMPNggJdEQmvFNMpYO3Kb6432kw= -k8s.io/cloud-provider v0.21.4 h1:BPGDdyz49/ohnK3QMDWBtm39QnDm+bXIP5L7mj8AHUQ= -k8s.io/cloud-provider v0.21.4/go.mod h1:9ogsWpFKWcYC0sGPu0YZ3FMLZIlaGBSFDCNXxhlCF1o= k8s.io/cloud-provider v0.21.5 h1:wLWaGA3VrHNqP8J3eimmxDdmCfLnNl0JcpRRYhKsrrU= k8s.io/cloud-provider v0.21.5/go.mod h1:8HT2WVbR6Xr6cc/B1+wnra/kgffFtUmPjsmUu9VMyv4= -k8s.io/cluster-bootstrap v0.21.4/go.mod h1:GtXGuiEtdV4XQJcscR6qQCm/vtQWkhUi3qnl9KL9jzw= k8s.io/cluster-bootstrap v0.21.5/go.mod h1:X6MX+aOJx6NzNlEe0iUIIcFKG06qC/fqHAyzAfAgaYo= -k8s.io/code-generator v0.21.4 h1:vO8jVuEGV4UF+/2s/88Qg05MokE/1QUFi/Q2YDgz++A= -k8s.io/code-generator v0.21.4/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= k8s.io/code-generator v0.21.5 h1:7X6dJG4hzKFHChYpP02iF0XrXhenqQHc76QoKYzDZfI= k8s.io/code-generator v0.21.5/go.mod h1:0K1k6o2ef8JD/j8LF3ZuqWLGFMHvO5psNzLLmxf7ZVE= -k8s.io/component-base v0.21.4 h1:Bc0AttSyhJFVXEIHz+VX+D11j/5z7SPPhl6whiXaRzs= -k8s.io/component-base v0.21.4/go.mod h1:ZKG0eHVX+tUDcaoIGpU3Vtk4TIjMddN9uhEWDmW6Nyg= k8s.io/component-base v0.21.5 h1:icFqcFDrO9S+FQpGohzVm6qce9vlo131K0r3NhElxiQ= k8s.io/component-base v0.21.5/go.mod h1:UyRaqQfPkBL/haEFaMWgVQvtom5TqAT+jqlFGlh6LuU= -k8s.io/component-helpers v0.21.4 h1:Q6L3sQ+L5uaaUcsJkhlzU5UchcIYBZ56Y2Bq5k4qOtk= -k8s.io/component-helpers v0.21.4/go.mod h1:/5TBNWmxaAymZweO1JWv3Pt5rcYJV1LbWWY0x1rDdVU= -k8s.io/component-helpers v0.21.5 h1:NzRIDAmDk0tJw2OSvDIlkXQ/j96MUKW0PF/htVH6S1g= -k8s.io/component-helpers v0.21.5/go.mod h1:sjHa2QESu4iHcL20eSKyIvCYEKdxQyS3LthUe10tt0k= k8s.io/component-helpers v0.24.0 h1:hZIHGfdd55thhqd9oxjDTw68OAPauDMJ+8hC69aNw1I= k8s.io/component-helpers v0.24.0/go.mod h1:Q2SlLm4h6g6lPTC9GMMfzdywfLSvJT2f1hOnnjaWD8c= -k8s.io/controller-manager v0.21.4/go.mod h1:a/iL7W19zkyirHDaupk9cyC11nejVznGwZI6I8tbyQY= k8s.io/controller-manager v0.21.5/go.mod h1:65guJmOoIEklTPwwYOmiHOsbEoAnQ7xKSZ9v+RdJC6g= -k8s.io/cri-api v0.21.4/go.mod h1:ukzeKnOkrG9/+ghKZA57WeZbQfRtqlGLF5GcF3RtHZ8= k8s.io/cri-api v0.21.5/go.mod h1:hYY+ZI/gXC3XMHIvuzRzDtb5BCEyoAOf44Z4a8GxoTk= -k8s.io/csi-translation-lib v0.21.4/go.mod h1:WtxJW4/3XGhllbRCO4SRkL/MyLhjaRsL6Ds+q0pDHTg= k8s.io/csi-translation-lib v0.21.5/go.mod h1:3ypbZqeM13aqwC1CpovssPkMhLgITWumH3n9PkdhDEA= k8s.io/gengo v0.0.0-20190306031000-7a1b7fb0289f/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -2456,43 +2403,26 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/kube-aggregator v0.21.4/go.mod h1:SykygeaVEQfqYH5IV8ve7Ia3dEGOGpGrdfD5NBi5yYI= k8s.io/kube-aggregator v0.21.5/go.mod h1:Zs74KHeA5RYNQw88cjfMtp46VCTIgQIX56FcxDE5NFo= -k8s.io/kube-controller-manager v0.21.4/go.mod h1:/wPS1gIX++/WjsIiimESnkpMqsjiIAMOpjVwjqLo7ng= k8s.io/kube-controller-manager v0.21.5/go.mod h1:adzsSLzeO3vkaxOTdbvHIe5WJZ7naB+s6080uCToGs0= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20210216185858-15cd8face8d6/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= -k8s.io/kube-proxy v0.21.4/go.mod h1:eUxSO/0Z/0JjKYz/aCZdwGea7lazumkTFrqS+OWcVNI= k8s.io/kube-proxy v0.21.5/go.mod h1:brL44h883BThxzRIcIGUiOCJpTXq5Bbq/InSMYAsdB4= -k8s.io/kube-scheduler v0.21.4 h1:oUVUCM+v6rum1i5vn5C3ZrqPNkp7exWiy7/Tfzbs9ZQ= -k8s.io/kube-scheduler v0.21.4/go.mod h1:zFiUfgeM/dJajfHYG8Bx5fSrNAcLxMHFgN7ARdSJXqQ= k8s.io/kube-scheduler v0.21.5 h1:yjm5Z3pIRwORBcR7HovteRhhC58+I/gCc07wO/HMYUI= k8s.io/kube-scheduler v0.21.5/go.mod h1:7hWWLzvl0yEr+gm2Kfvt1wikhXwQb2BNylvOwzSlSMM= -k8s.io/kubectl v0.21.4 h1:ODXpSKpi5C6XnJmGg96E/36KAry513v4Jr9Efg3ePJI= -k8s.io/kubectl v0.21.4/go.mod h1:rRYB5HeScoGQKxZDQmus17pTSVIuqfm0D31ApET/qSM= k8s.io/kubectl v0.21.5 h1:Ov5ivI1SanAoVPI/n6/Sik+MQTaeGp7U2S02loXBB/s= k8s.io/kubectl v0.21.5/go.mod h1:1dDgqGZdQWH6IOLozcxQ3Tyvc5CnEL1Int6St4XEV8w= -k8s.io/kubelet v0.21.4/go.mod h1:kgXUz8upYNIngMSEZP1rpg2kp4gfUrsB7ir5u9Cm4HE= k8s.io/kubelet v0.21.5/go.mod h1:yVKsH4usaXy40Z3cZ8jknE70obOF/4aFNB7bittEEZ0= -k8s.io/kubernetes v1.21.4 h1:uKnn+MDBG4Bsed/iD3L6gMkq/szAnMqeHuSjkc3WOzQ= -k8s.io/kubernetes v1.21.4/go.mod h1:yNRsD2sfx76jpLKTgr0lJdVnILFWRo7b+HCo94tD48c= k8s.io/kubernetes v1.21.5 h1:PpXs+a5FdF5Nwy+9vPjs5svULcTH923QCOjzdLqZmyw= k8s.io/kubernetes v1.21.5/go.mod h1:o8QsgtH5UB3z9BYhcUZt9S6zjcJ4vdFsj2ACinL44Ss= -k8s.io/legacy-cloud-providers v0.21.4/go.mod h1:WzvDvkWfD7lKQSaSqqaYsoY3VQeAjhXYN2telpMx8co= k8s.io/legacy-cloud-providers v0.21.5/go.mod h1:VGdzalKK13Q8eJuhbrmPbuwyjc9vVaQ8T0asHpSJNBg= -k8s.io/metrics v0.21.4/go.mod h1:uhWoVuVumUMSeCa1B1p2tm4Y4XuZIg0n24QEtB54wuA= k8s.io/metrics v0.21.5/go.mod h1:Ew+6obDfJiQVsi6J2NkoI5jNMio/CCPC5v3pLXH8vos= -k8s.io/mount-utils v0.21.4 h1:T24Y4FJ9IRkXgA+UkQHr+F+f/nm7sqdkdmdSxTtF+lw= -k8s.io/mount-utils v0.21.4/go.mod h1:dwXbIPxKtTjrBEaX1aK/CMEf1KZ8GzMHpe3NEBfdFXI= k8s.io/mount-utils v0.21.5 h1:2aapn4dg0L/naSvr9vze7vIjW6nelq3hNHxb2nLselc= k8s.io/mount-utils v0.21.5/go.mod h1:dwXbIPxKtTjrBEaX1aK/CMEf1KZ8GzMHpe3NEBfdFXI= -k8s.io/sample-apiserver v0.21.4/go.mod h1:rpVLxky91DoN2OehmyZf/IE+sgop/BBoZl78VJrrs0I= k8s.io/sample-apiserver v0.21.5/go.mod h1:XqwON+6Rv40cwSe+Sr6ihQEcMI1MCvin8sDFAPFVQHc= k8s.io/sample-controller v0.20.4/go.mod h1:PAxO4dMU0MA62CB6ZyHM2rng/7oMOBLyF4qrDVA0Tcc= k8s.io/system-validators v1.4.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= @@ -2505,9 +2435,7 @@ k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl k8s.io/utils v0.0.0-20200124190032-861946025e34/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 h1:DnzUXII7sVg1FJ/4JX6YDRJfLNAC7idRatPwe07suiI= k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -2529,8 +2457,6 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyz sigs.k8s.io/cluster-api v0.2.11 h1:sUngHVvh/DyHhERR1fo7eH2N/xS5qfnK7pCtwrErs68= sigs.k8s.io/cluster-api v0.2.11/go.mod h1:BCw+Pqy1sc8mQ/3d2NZM/f5BApKFCMPsnGvKolvDcA0= sigs.k8s.io/container-object-storage-interface-spec v0.0.0-20220211001052-50e143052de8/go.mod h1:kafkL5l/lTUrZXhVi/9p1GzpEE/ts29BkWkL3Ao33WU= -sigs.k8s.io/controller-runtime v0.9.0 h1:ZIZ/dtpboPSbZYY7uUz2OzrkaBTOThx2yekLtpGB+zY= -sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= sigs.k8s.io/controller-runtime v0.9.6 h1:EevVMlgUj4fC1NVM4+DB3iPkWkmGRNarA66neqv9Qew= sigs.k8s.io/controller-runtime v0.9.6/go.mod h1:q6PpkM5vqQubEKUKOM6qr06oXGzOBcCby1DA9FbyZeA= sigs.k8s.io/gcp-compute-persistent-disk-csi-driver v0.7.0 h1:mvSbjzrnOd+3AB/7jvz7UNdZs5fhYorhm2H0A2HcIVg= diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go index a89597e40f..c21902b623 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go @@ -293,6 +293,12 @@ func jobFor( logrus.Errorf("%v", errMsg) return nil, errMsg } + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobName) + } job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, @@ -337,6 +343,7 @@ func jobFor( }, }, }, + Tolerations: tolerations, Volumes: []corev1.Volume{ { Name: "vol", diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackuplive.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackuplive.go index 119699e0a2..876f37c476 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackuplive.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackuplive.go @@ -79,6 +79,12 @@ func jobForLiveBackup( logrus.Errorf("%v", errMsg) return nil, errMsg } + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobName) + } job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, @@ -127,6 +133,7 @@ func jobForLiveBackup( }, }, }, + Tolerations: tolerations, Volumes: []corev1.Volume{ { Name: "vol", diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go index 6908bacac7..252feb92c6 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go @@ -203,7 +203,12 @@ func jobFor( logrus.Errorf("%v", errMsg) return nil, errMsg } - + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobName) + } job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, @@ -245,6 +250,7 @@ func jobFor( }, }, }, + Tolerations: tolerations, Volumes: []corev1.Volume{ { Name: "cred-secret", @@ -335,6 +341,16 @@ func jobFor( }, }, } + } else { + nodeAffinity, err := utils.GetNodeAffinityFromDeployment(jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the node affinity details: %v", err) + return nil, fmt.Errorf("failed to get the node affinity details for job [%s/%s]", jobOption.Namespace, jobName) + } + job.Spec.Template.Spec.Affinity = &corev1.Affinity{ + NodeAffinity: nodeAffinity, + } } job.Spec.Template.Spec.Containers[0].Env = env diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiamaintenance/kopiamaintenance.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiamaintenance/kopiamaintenance.go index 642898867d..47e4bd7f28 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiamaintenance/kopiamaintenance.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiamaintenance/kopiamaintenance.go @@ -215,6 +215,13 @@ func jobFor( return nil, errMsg } + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobName) + } + jobObjectMeta := metav1.ObjectMeta{ Name: jobName, Namespace: jobOption.JobNamespace, @@ -250,6 +257,7 @@ func jobFor( }, }, }, + Tolerations: tolerations, Volumes: []corev1.Volume{ { Name: "cred-secret", @@ -333,6 +341,16 @@ func jobFor( }, }, } + } else { + nodeAffinity, err := utils.GetNodeAffinityFromDeployment(jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the node affinity details: %v", err) + return nil, fmt.Errorf("failed to get the node affinity details for job [%s/%s]", jobOption.Namespace, jobName) + } + jobSpec.Affinity = &corev1.Affinity{ + NodeAffinity: nodeAffinity, + } } if requiresV1 { diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiarestore/kopiarestore.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiarestore/kopiarestore.go index 0864e89035..23f1fd2fe1 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiarestore/kopiarestore.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiarestore/kopiarestore.go @@ -210,6 +210,12 @@ func jobFor( logrus.Errorf("%v", errMsg) return nil, errMsg } + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobName) + } job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, @@ -254,6 +260,7 @@ func jobFor( }, }, }, + Tolerations: tolerations, Volumes: []corev1.Volume{ { Name: "vol", diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go index 6b954ed6e1..d687e02905 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go @@ -198,6 +198,12 @@ func jobForBackupResource( logrus.Errorf("failed to get the executor image details") return nil, fmt.Errorf("failed to get the executor image details for job %s", jobOption.JobName) } + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.NfsImageExecutorSource, + jobOption.NfsImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobOption.RestoreExportName) + } job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobOption.RestoreExportName, @@ -238,6 +244,7 @@ func jobForBackupResource( }, }, }, + Tolerations: tolerations, Volumes: []corev1.Volume{ { Name: "cred-secret", diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsdelete/nfsdelete.go b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsdelete/nfsdelete.go index 2a5d772e30..a4964a5a7e 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsdelete/nfsdelete.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsdelete/nfsdelete.go @@ -165,15 +165,20 @@ func jobForDeleteResource( }, " ") nfsExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.NfsExecutorImage, - jobOption.KopiaImageExecutorSource, - jobOption.KopiaImageExecutorSourceNs, + jobOption.NfsImageExecutorSource, + jobOption.NfsImageExecutorSourceNs, jobOption.JobName, jobOption) if err != nil { logrus.Errorf("failed to get the executor image details") return nil, fmt.Errorf("failed to get the executor image details for job %s", jobOption.JobName) } - + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.NfsImageExecutorSource, + jobOption.NfsImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobOption.JobName) + } job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobOption.JobName, @@ -214,6 +219,7 @@ func jobForDeleteResource( }, }, }, + Tolerations: tolerations, Volumes: []corev1.Volume{ { Name: "cred-secret", @@ -249,5 +255,15 @@ func jobForDeleteResource( job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) } + nodeAffinity, err := utils.GetNodeAffinityFromDeployment(jobOption.NfsImageExecutorSource, + jobOption.NfsImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the node affinity details: %v", err) + return nil, fmt.Errorf("failed to get the node affinity details for job [%s/%s]", jobOption.Namespace, jobOption.JobName) + } + job.Spec.Template.Spec.Affinity = &corev1.Affinity{ + NodeAffinity: nodeAffinity, + } + return job, nil } diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go index be5c95a6e9..0281b6e8b7 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go @@ -217,6 +217,12 @@ func jobForRestoreResource( logrus.Errorf("failed to get the executor image details") return nil, fmt.Errorf("failed to get the executor image details for job %s", jobOption.JobName) } + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.NfsImageExecutorSource, + jobOption.NfsImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobOption.RestoreExportName) + } job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobOption.RestoreExportName, @@ -257,6 +263,7 @@ func jobForRestoreResource( }, }, }, + Tolerations: tolerations, Volumes: []corev1.Volume{ { Name: "cred-secret", diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go index b2ff03f585..43b1704c48 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go @@ -631,6 +631,10 @@ func CreateImageRegistrySecret(sourceName, destName, sourceNamespace, destNamesp // and create one in the current job's namespace secret, err := core.Instance().GetSecret(sourceName, sourceNamespace) if err != nil { + // Safely exit if image registry secret is not found. + if apierrors.IsNotFound(err) { + return nil + } logrus.Errorf("failed in getting secret [%v/%v]: %v", sourceNamespace, sourceName, err) return err } @@ -734,7 +738,7 @@ func CreateNfsPvc(pvcName string, pvName string, namespace string) error { Spec: corev1.PersistentVolumeClaimSpec{ // Setting it to empty stringm so that default storage class will not selected. StorageClassName: &empttyStorageClass, - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceName(corev1.ResourceStorage): resource.MustParse(nfsVolumeSize), @@ -845,3 +849,21 @@ func GetPvcNameForJob(jobName string) string { func GetPvNameForJob(jobName string) string { return "pv-" + jobName } + +// GetTolerationsFromDeployment - extract tolerations from deployment spec +func GetTolerationsFromDeployment(name, namespace string) ([]corev1.Toleration, error) { + deploy, err := apps.Instance().GetDeployment(name, namespace) + if err != nil { + return nil, err + } + return deploy.Spec.Template.Spec.Tolerations, nil +} + +// GetNodeAffinityFromDeployment - extract NodeAffinity from deployment spec +func GetNodeAffinityFromDeployment(name, namespace string) (*corev1.NodeAffinity, error) { + deploy, err := apps.Instance().GetDeployment(name, namespace) + if err != nil { + return nil, err + } + return deploy.Spec.Template.Spec.Affinity.NodeAffinity, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8fda0e9769..6287323f51 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -691,7 +691,7 @@ github.com/pierrec/lz4/internal/xxh32 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/portworx/kdmp v0.4.1-0.20221109152346-d4c8431982fc +# github.com/portworx/kdmp v0.4.1-0.20221110051828-bdbb3afc4dd0 ## explicit github.com/portworx/kdmp/pkg/apis/kdmp github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1 @@ -1121,8 +1121,6 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/grpc/examples v0.0.0-20220715232852-f601dfac73c9 -## explicit # google.golang.org/protobuf v1.28.0 google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo google.golang.org/protobuf/compiler/protogen @@ -1176,7 +1174,6 @@ gopkg.in/square/go-jose.v2/jwt ## explicit gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 -## explicit gopkg.in/yaml.v3 # helm.sh/helm/v3 v3.0.0-00010101000000-000000000000 => helm.sh/helm/v3 v3.6.1 helm.sh/helm/v3/internal/experimental/registry From dd80df846e8aa132c2a568da63d1d0e9c4445a71 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Fri, 11 Nov 2022 07:08:30 +0000 Subject: [PATCH 83/97] pb-3258: Added TriggeredFromxxx field for resourceExport as well --- pkg/applicationmanager/controllers/applicationrestore.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index 9a84cdb885..f6c7d9e29a 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -1499,6 +1499,13 @@ func (a *ApplicationRestoreController) restoreResources( resourceExport.Name = crName resourceExport.Namespace = a.restoreAdminNamespace resourceExport.Spec.Type = kdmpapi.ResourceExportBackup + resourceExport.Spec.TriggeredFrom = kdmputils.TriggeredFromStork + storkPodNs, err := k8sutils.GetStorkPodNamespace() + if err != nil { + logrus.Errorf("error in getting stork pod namespace: %v", err) + return err + } + resourceExport.Spec.TriggeredFromNs = storkPodNs source := &kdmpapi.ResourceExportObjectReference{ APIVersion: restore.APIVersion, Kind: restore.Kind, From 5dab55dc45613b31aa2b20ae021862836e7603ec Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Thu, 3 Nov 2022 04:30:23 +0000 Subject: [PATCH 84/97] pb3206: Not returning error if the storageclass is missing on destination cluster --- drivers/volume/portworx/portworx.go | 106 ++++++++++++++-------------- 1 file changed, 54 insertions(+), 52 deletions(-) diff --git a/drivers/volume/portworx/portworx.go b/drivers/volume/portworx/portworx.go index ccd71926ff..c0217f75d7 100644 --- a/drivers/volume/portworx/portworx.go +++ b/drivers/volume/portworx/portworx.go @@ -2610,67 +2610,69 @@ func (p *portworx) UpdateMigratedPersistentVolumeSpec( if len(pv.Spec.StorageClassName) != 0 { sc, err := storage.Instance().GetStorageClass(pv.Spec.StorageClassName) if err != nil { - return nil, fmt.Errorf("failed in getting the storage class [%v]: %v", pv.Spec.StorageClassName, err) + logrus.Warnf("failed in getting the storage class [%v]: %v", pv.Spec.StorageClassName, err) } - if isCsiProvisioner(sc.Provisioner) { - // add csi section in the pv spec - if pv.Spec.CSI == nil { - pv.Spec.CSI = &v1.CSIPersistentVolumeSource{} - } - // get the destinationNamespace - var dstNamespace string - var exists bool - if dstNamespace, exists = namespaceMapping[vInfo.SourceNamespace]; !exists { - dstNamespace = vInfo.SourceNamespace - } - // Update the controller expand secret - if val, ok := sc.Parameters[controllerExpandSecretName]; ok { - if pv.Spec.CSI.ControllerExpandSecretRef == nil { - pv.Spec.CSI.ControllerExpandSecretRef = &v1.SecretReference{} + if sc != nil { + if isCsiProvisioner(sc.Provisioner) { + // add csi section in the pv spec + if pv.Spec.CSI == nil { + pv.Spec.CSI = &v1.CSIPersistentVolumeSource{} } - if val == templatizedName { - pv.Spec.CSI.ControllerExpandSecretRef.Name = vInfo.PersistentVolumeClaim - } else { - pv.Spec.CSI.ControllerExpandSecretRef.Name = val + // get the destinationNamespace + var dstNamespace string + var exists bool + if dstNamespace, exists = namespaceMapping[vInfo.SourceNamespace]; !exists { + dstNamespace = vInfo.SourceNamespace } - } - if val, ok := sc.Parameters[controllerExpandSecretNamespace]; ok { - if pv.Spec.CSI.ControllerExpandSecretRef == nil { - pv.Spec.CSI.ControllerExpandSecretRef = &v1.SecretReference{} + // Update the controller expand secret + if val, ok := sc.Parameters[controllerExpandSecretName]; ok { + if pv.Spec.CSI.ControllerExpandSecretRef == nil { + pv.Spec.CSI.ControllerExpandSecretRef = &v1.SecretReference{} + } + if val == templatizedName { + pv.Spec.CSI.ControllerExpandSecretRef.Name = vInfo.PersistentVolumeClaim + } else { + pv.Spec.CSI.ControllerExpandSecretRef.Name = val + } } - if val == templatizedNamespace { - pv.Spec.CSI.ControllerExpandSecretRef.Namespace = dstNamespace - } else { - pv.Spec.CSI.ControllerExpandSecretRef.Namespace = val + if val, ok := sc.Parameters[controllerExpandSecretNamespace]; ok { + if pv.Spec.CSI.ControllerExpandSecretRef == nil { + pv.Spec.CSI.ControllerExpandSecretRef = &v1.SecretReference{} + } + if val == templatizedNamespace { + pv.Spec.CSI.ControllerExpandSecretRef.Namespace = dstNamespace + } else { + pv.Spec.CSI.ControllerExpandSecretRef.Namespace = val + } } - } - // Update the node publish secret - if val, ok := sc.Parameters[nodePublishSecretName]; ok { - if pv.Spec.CSI.NodePublishSecretRef == nil { - pv.Spec.CSI.NodePublishSecretRef = &v1.SecretReference{} - } - if val == templatizedName { - pv.Spec.CSI.NodePublishSecretRef.Name = vInfo.PersistentVolumeClaim - } else { - pv.Spec.CSI.NodePublishSecretRef.Name = val - } - } - if val, ok := sc.Parameters[nodePublishSecretNamespace]; ok { - if pv.Spec.CSI.NodePublishSecretRef == nil { - pv.Spec.CSI.NodePublishSecretRef = &v1.SecretReference{} + // Update the node publish secret + if val, ok := sc.Parameters[nodePublishSecretName]; ok { + if pv.Spec.CSI.NodePublishSecretRef == nil { + pv.Spec.CSI.NodePublishSecretRef = &v1.SecretReference{} + } + if val == templatizedName { + pv.Spec.CSI.NodePublishSecretRef.Name = vInfo.PersistentVolumeClaim + } else { + pv.Spec.CSI.NodePublishSecretRef.Name = val + } } - if val == templatizedNamespace { - pv.Spec.CSI.NodePublishSecretRef.Namespace = dstNamespace - } else { - pv.Spec.CSI.NodePublishSecretRef.Namespace = val + if val, ok := sc.Parameters[nodePublishSecretNamespace]; ok { + if pv.Spec.CSI.NodePublishSecretRef == nil { + pv.Spec.CSI.NodePublishSecretRef = &v1.SecretReference{} + } + if val == templatizedNamespace { + pv.Spec.CSI.NodePublishSecretRef.Namespace = dstNamespace + } else { + pv.Spec.CSI.NodePublishSecretRef.Namespace = val + } } - } - // Update driver (provisioner) name - pv.Spec.CSI.Driver = sc.Provisioner - // In the case of csi, will set pv.Spec.portworxVolume to nil as we will have csi section now. - pv.Spec.PortworxVolume = nil + // Update driver (provisioner) name + pv.Spec.CSI.Driver = sc.Provisioner + // In the case of csi, will set pv.Spec.portworxVolume to nil as we will have csi section now. + pv.Spec.PortworxVolume = nil + } } } From 9eb8da89a6f38eb4a874874e2d83664bba296de5 Mon Sep 17 00:00:00 2001 From: Prashanth Kumar Date: Fri, 11 Nov 2022 16:38:27 +0000 Subject: [PATCH 85/97] Appended restore backup with nfs for jobs --- pkg/applicationmanager/controllers/applicationbackup.go | 6 +++--- pkg/utils/utils.go | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index 5d2bd1938b..3a876f95c2 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -1339,7 +1339,7 @@ func (a *ApplicationBackupController) backupResources( if nfs { // Check whether ResourceExport is present or not - crName := getResourceExportCRName(utils.PrefixBackup, string(backup.UID), backup.Namespace) + crName := getResourceExportCRName(utils.PrefixNFSBackup, string(backup.UID), backup.Namespace) resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, a.backupAdminNamespace) if err != nil { if k8s_errors.IsNotFound(err) { @@ -1359,7 +1359,7 @@ func (a *ApplicationBackupController) backupResources( resourceExport.Labels = labels resourceExport.Annotations = make(map[string]string) resourceExport.Annotations[utils.SkipResourceAnnotation] = "true" - resourceExport.Name = getResourceExportCRName(utils.PrefixBackup, string(backup.UID), backup.Namespace) + resourceExport.Name = getResourceExportCRName(utils.PrefixNFSBackup, string(backup.UID), backup.Namespace) resourceExport.Namespace = a.backupAdminNamespace resourceExport.Spec.Type = kdmpapi.ResourceExportBackup source := &kdmpapi.ResourceExportObjectReference{ @@ -1646,7 +1646,7 @@ func (a *ApplicationBackupController) cleanupResources( } // Directly calling DeleteResourceExport with out checking backuplocation type. // For other backuplocation type, expecting Notfound - crName := getResourceExportCRName(utils.PrefixBackup, string(backup.UID), backup.Namespace) + crName := getResourceExportCRName(utils.PrefixNFSBackup, string(backup.UID), backup.Namespace) err := kdmpShedOps.Instance().DeleteResourceExport(crName, a.backupAdminNamespace) if err != nil && !k8s_errors.IsNotFound(err) { errMsg := fmt.Sprintf("failed to delete data export CR [%v]: %v", crName, err) diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index a2be450aee..396cf40ce1 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -24,6 +24,8 @@ const ( // PrefixBackup - prefix string that will be used for the kdmp backup job PrefixBackup = "backup" + // PrefixNFSBackup prefix string that will be used for the nfs backup job + PrefixNFSBackup = "nfs-backup" // PrefixRestore prefix string that will be used for the kdmp restore job PrefixRestore = "nfs-restore-resource" // PrefixNFSRestorePVC prefix string that will be used for pvc creation during nfs vol restore From 8593f9a7d53aa261fb3cb8569b0866d97e069eca Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Sat, 12 Nov 2022 18:14:59 +0000 Subject: [PATCH 86/97] vendor latest kdmp from 1.2.3 branch --- go.mod | 2 +- go.sum | 2 + .../pkg/apis/kdmp/v1alpha1/resourcebackup.go | 2 + .../pkg/controllers/dataexport/reconcile.go | 11 +----- .../pkg/drivers/kopiadelete/kopiadelete.go | 4 +- .../kdmp/pkg/drivers/nfsbackup/nfsbackup.go | 12 +++--- .../kdmp/pkg/drivers/nfsrestore/nfsrestore.go | 10 ++--- .../portworx/kdmp/pkg/drivers/utils/utils.go | 37 +++---------------- vendor/modules.txt | 2 +- 9 files changed, 27 insertions(+), 55 deletions(-) diff --git a/go.mod b/go.mod index b132babb31..f5d6991319 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/openshift/api v0.0.0-20210105115604-44119421ec6b github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47 github.com/pborman/uuid v1.2.0 - github.com/portworx/kdmp v0.4.1-0.20221110051828-bdbb3afc4dd0 + github.com/portworx/kdmp v0.4.1-0.20221112180956-71d263e21fb9 github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 diff --git a/go.sum b/go.sum index 495fb0f601..7cb7560b3c 100644 --- a/go.sum +++ b/go.sum @@ -1426,6 +1426,8 @@ github.com/portworx/kdmp v0.4.1-0.20220902105026-dc14791e1508/go.mod h1:sTO9LkPk github.com/portworx/kdmp v0.4.1-0.20220905153748-e0bb69e59f38/go.mod h1:NI2UgLITtggRvvaRA7lE4+Np4htDp+06Jf1LWksKyGA= github.com/portworx/kdmp v0.4.1-0.20221110051828-bdbb3afc4dd0 h1:rIpCh+5iQCIQgpjUWsc1ToWXeX+NbtCiZm2leyPx5s4= github.com/portworx/kdmp v0.4.1-0.20221110051828-bdbb3afc4dd0/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= +github.com/portworx/kdmp v0.4.1-0.20221112180956-71d263e21fb9 h1:I9LjqBwgka4Z3ny8sDioy3RfX4j5v5mLRVUS3H0f2D4= +github.com/portworx/kdmp v0.4.1-0.20221112180956-71d263e21fb9/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= diff --git a/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourcebackup.go b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourcebackup.go index eb24932be2..e1aa7fd764 100644 --- a/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourcebackup.go +++ b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourcebackup.go @@ -35,6 +35,8 @@ const ( // ResourceBackupProgressStatus overall resource backup/restore progress type ResourceBackupProgressStatus struct { + // ProgressPercentage is the progress of the command in percentage + ProgressPercentage float64 // Status status of resource export Status ResourceBackupStatus `json:"status,omitempty"` // Reason status reason diff --git a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go index e523f385e1..5257134fd3 100644 --- a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go +++ b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go @@ -688,7 +688,7 @@ func (c *Controller) stageSnapshotScheduled(ctx context.Context, dataExport *kdm snapName := toSnapName(dataExport.Spec.Source.Name, string(dataExport.UID)) annotations := make(map[string]string) annotations[dataExportUIDAnnotation] = string(dataExport.UID) - annotations[dataExportNameAnnotation] = trimLabel(dataExport.Name) + annotations[dataExportNameAnnotation] = utils.GetValidLabel(dataExport.Name) annotations[backupObjectUIDKey] = backupUID annotations[pvcUIDKey] = pvcUID labels := make(map[string]string) @@ -1526,7 +1526,7 @@ func (c *Controller) restoreSnapshot(ctx context.Context, snapshotDriver snapsho pvc.Annotations = make(map[string]string) pvc.Annotations[skipResourceAnnotation] = "true" pvc.Annotations[dataExportUIDAnnotation] = string(de.UID) - pvc.Annotations[dataExportNameAnnotation] = trimLabel(de.Name) + pvc.Annotations[dataExportNameAnnotation] = utils.GetValidLabel(de.Name) // If storage class annotation is set , then put that annotation too in the temp pvc // Sometimes the spec.storageclass might be empty, in that case the temp pvc may get the sc as the default sc @@ -2079,13 +2079,6 @@ func toBoundJobPVCName(pvcName string, pvcUID string) string { return fmt.Sprintf("%s-%s-%s", "bound", truncatedPVCName, uidToken[0]) } -func trimLabel(label string) string { - if len(label) > 63 { - return label[:63] - } - return label -} - func getRepoPVCName(de *kdmpapi.DataExport, pvcName string) string { if hasSnapshotStage(de) { subStrings := strings.Split(pvcName, "-") diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go index 252feb92c6..da51c02938 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go @@ -372,7 +372,7 @@ func toRepoName(pvcName, pvcNamespace string) string { func addVolumeBackupDeleteLabels(jobOpts drivers.JobOpts) map[string]string { labels := make(map[string]string) - labels[utils.BackupObjectNameKey] = jobOpts.BackupObjectName + labels[utils.BackupObjectNameKey] = utils.GetValidLabel(jobOpts.BackupObjectName) labels[utils.BackupObjectUIDKey] = jobOpts.BackupObjectUID return labels } @@ -383,7 +383,7 @@ func addJobLabels(labels map[string]string, jobOpts drivers.JobOpts) map[string] } labels[drivers.DriverNameLabel] = drivers.KopiaDelete - labels[utils.BackupObjectNameKey] = jobOpts.BackupObjectName + labels[utils.BackupObjectNameKey] = utils.GetValidLabel(jobOpts.BackupObjectName) labels[utils.BackupObjectUIDKey] = jobOpts.BackupObjectUID return labels } diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go index d687e02905..0e5453631c 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go @@ -73,7 +73,7 @@ func (d Driver) JobStatus(id string) (*drivers.JobStatus, error) { fn := "JobStatus" namespace, name, err := utils.ParseJobID(id) if err != nil { - return utils.ToNFSJobStatus(err.Error(), batchv1.JobConditionType("")), nil + return utils.ToJobStatus(0, err.Error(), batchv1.JobConditionType("")), nil } job, err := batch.Instance().GetJob(name, namespace) @@ -97,11 +97,11 @@ func (d Driver) JobStatus(id string) (*drivers.JobStatus, error) { var errMsg string if jobErr { errMsg = fmt.Sprintf("check %s/%s job for details: %s", namespace, name, drivers.ErrJobFailed) - return utils.ToNFSJobStatus(errMsg, jobStatus), nil + return utils.ToJobStatus(0, errMsg, jobStatus), nil } if nodeErr { errMsg = fmt.Sprintf("Node [%v] on which job [%v/%v] schedules is NotReady", job.Spec.Template.Spec.NodeName, namespace, name) - return utils.ToNFSJobStatus(errMsg, jobStatus), nil + return utils.ToJobStatus(0, errMsg, jobStatus), nil } res, err := kdmp.Instance().GetResourceBackup(name, namespace) @@ -109,12 +109,12 @@ func (d Driver) JobStatus(id string) (*drivers.JobStatus, error) { if apierrors.IsNotFound(err) { if utils.IsJobPending(job) { logrus.Warnf("backup job %s is in pending state", job.Name) - return utils.ToNFSJobStatus(err.Error(), jobStatus), nil + return utils.ToJobStatus(0, err.Error(), jobStatus), nil } } } - - return utils.ToNFSJobStatus(res.Status.Reason, jobStatus), nil + logrus.Tracef("res.Status: %v", res.Status) + return utils.ToJobStatus(res.Status.ProgressPercentage, res.Status.Reason, jobStatus), nil } func buildJob( diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go index 0281b6e8b7..12409df44b 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go @@ -73,7 +73,7 @@ func (d Driver) JobStatus(id string) (*drivers.JobStatus, error) { fn := "JobStatus" namespace, name, err := utils.ParseJobID(id) if err != nil { - return utils.ToNFSJobStatus(err.Error(), batchv1.JobConditionType("")), nil + return utils.ToJobStatus(0, err.Error(), batchv1.JobConditionType("")), nil } job, err := batch.Instance().GetJob(name, namespace) @@ -98,11 +98,11 @@ func (d Driver) JobStatus(id string) (*drivers.JobStatus, error) { var errMsg string if jobErr { errMsg = fmt.Sprintf("check %s/%s job for details: %s", namespace, name, drivers.ErrJobFailed) - return utils.ToNFSJobStatus(errMsg, jobStatus), nil + return utils.ToJobStatus(0, errMsg, jobStatus), nil } if nodeErr { errMsg = fmt.Sprintf("Node [%v] on which job [%v/%v] schedules is NotReady", job.Spec.Template.Spec.NodeName, namespace, name) - return utils.ToNFSJobStatus(errMsg, jobStatus), nil + return utils.ToJobStatus(0, errMsg, jobStatus), nil } res, err := kdmp.Instance().GetResourceBackup(name, namespace) @@ -110,12 +110,12 @@ func (d Driver) JobStatus(id string) (*drivers.JobStatus, error) { if apierrors.IsNotFound(err) { if utils.IsJobPending(job) { logrus.Warnf("restore job %s is in pending state", job.Name) - return utils.ToNFSJobStatus(err.Error(), jobStatus), nil + return utils.ToJobStatus(0, err.Error(), jobStatus), nil } } } logrus.Tracef("%s jobStatus:%v", fn, jobStatus) - return utils.ToNFSJobStatus(res.Status.Reason, jobStatus), nil + return utils.ToJobStatus(res.Status.ProgressPercentage, res.Status.Reason, jobStatus), nil } func buildJob( diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go index 43b1704c48..ac75174f25 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go @@ -61,6 +61,8 @@ const ( ResourceUploadSuccessMsg = "upload resource Successfully" // PvcBoundSuccessMsg - pvc bound success message PvcBoundSuccessMsg = "pvc bounded successfully" + // PvcBoundFailedMsg pvc not bounded msg + PvcBoundFailedMsg = "pvc not bounded" ) var ( @@ -211,43 +213,15 @@ func FetchJobContainerRestartCount(j *batchv1.Job) (int32, error) { // ToJobStatus returns a job status for provided parameters. func ToJobStatus(progress float64, errMsg string, jobStatus batchv1.JobConditionType) *drivers.JobStatus { - if len(errMsg) > 0 { - return &drivers.JobStatus{ - State: drivers.JobStateFailed, - Reason: errMsg, - Status: jobStatus, - } - } - if drivers.IsTransferCompleted(progress) { return &drivers.JobStatus{ State: drivers.JobStateCompleted, ProgressPercents: progress, Status: jobStatus, + Reason: errMsg, } } - return &drivers.JobStatus{ - State: drivers.JobStateInProgress, - ProgressPercents: progress, - Status: jobStatus, - } -} - -// ToNFSJobStatus returns a job status for provided parameters. -func ToNFSJobStatus(errMsg string, jobStatus batchv1.JobConditionType) *drivers.JobStatus { - // Note: This err msg has to match with the msg set in executor when the job - // is successful - // TODO: Need to have better logical way to notify job completion, this - // hard coding of msg doesn't look good - if errMsg == ResourceUploadSuccessMsg || - errMsg == PvcBoundSuccessMsg { - return &drivers.JobStatus{ - State: drivers.JobStateCompleted, - Reason: errMsg, - Status: jobStatus, - } - } if len(errMsg) > 0 { return &drivers.JobStatus{ State: drivers.JobStateFailed, @@ -257,8 +231,9 @@ func ToNFSJobStatus(errMsg string, jobStatus batchv1.JobConditionType) *drivers. } return &drivers.JobStatus{ - State: drivers.JobStateInProgress, - Status: jobStatus, + State: drivers.JobStateInProgress, + ProgressPercents: progress, + Status: jobStatus, } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 6287323f51..90e6af564d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -691,7 +691,7 @@ github.com/pierrec/lz4/internal/xxh32 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/portworx/kdmp v0.4.1-0.20221110051828-bdbb3afc4dd0 +# github.com/portworx/kdmp v0.4.1-0.20221112180956-71d263e21fb9 ## explicit github.com/portworx/kdmp/pkg/apis/kdmp github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1 From b8e8bb15a71f363048a8621cb0e10cedfdf8bc17 Mon Sep 17 00:00:00 2001 From: Prashanth Kumar Date: Thu, 17 Nov 2022 01:47:16 +0000 Subject: [PATCH 87/97] pb-3306: Removing setting of CR status to success before checking for all resources status --- pkg/applicationmanager/controllers/applicationrestore.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index f6c7d9e29a..299cc93fd2 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -1569,10 +1569,6 @@ func (a *ApplicationRestoreController) restoreResources( resource.Status, resource.Reason) } - restore.Status.Stage = storkapi.ApplicationRestoreStageFinal - restore.Status.FinishTimestamp = metav1.Now() - restore.Status.Status = storkapi.ApplicationRestoreStatusSuccessful - restore.Status.Reason = "Volumes and resources were restored up successfully" case kdmpapi.ResourceExportStatusInitial: doCleanup = false case kdmpapi.ResourceExportStatusPending: From 3b00af2fcce011440b56d0e349d71d5485fb6272 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Sun, 20 Nov 2022 13:43:05 +0000 Subject: [PATCH 88/97] vendor kdmp from 1.2.3 release branch. --- go.mod | 2 +- go.sum | 2 ++ .../kdmp/pkg/controllers/dataexport/reconcile.go | 10 +++++++++- .../kdmp/pkg/drivers/kopiabackup/kopiabackup.go | 10 +++++++++- .../portworx/kdmp/pkg/drivers/utils/common.go | 8 ++++++++ vendor/modules.txt | 2 +- 6 files changed, 30 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index f5d6991319..7a8be56cb5 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/openshift/api v0.0.0-20210105115604-44119421ec6b github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47 github.com/pborman/uuid v1.2.0 - github.com/portworx/kdmp v0.4.1-0.20221112180956-71d263e21fb9 + github.com/portworx/kdmp v0.4.1-0.20221120133908-371b5c7190ff github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 diff --git a/go.sum b/go.sum index 7cb7560b3c..448e85c04d 100644 --- a/go.sum +++ b/go.sum @@ -1428,6 +1428,8 @@ github.com/portworx/kdmp v0.4.1-0.20221110051828-bdbb3afc4dd0 h1:rIpCh+5iQCIQgpj github.com/portworx/kdmp v0.4.1-0.20221110051828-bdbb3afc4dd0/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kdmp v0.4.1-0.20221112180956-71d263e21fb9 h1:I9LjqBwgka4Z3ny8sDioy3RfX4j5v5mLRVUS3H0f2D4= github.com/portworx/kdmp v0.4.1-0.20221112180956-71d263e21fb9/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= +github.com/portworx/kdmp v0.4.1-0.20221120133908-371b5c7190ff h1:KU7WhC+O8HsaFU1yZTb+8SCh7zUBfhQV2Qi4ar+OzNo= +github.com/portworx/kdmp v0.4.1-0.20221120133908-371b5c7190ff/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= diff --git a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go index 5257134fd3..f563fe64d5 100644 --- a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go +++ b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go @@ -540,7 +540,15 @@ func (c *Controller) createJobCredCertSecrets( } return data, err } - if len(pods) > 0 { + // filter out the pods that are create by us + count := len(pods) + for _, pod := range pods { + labels := pod.ObjectMeta.Labels + if _, ok := labels[drivers.DriverNameLabel]; ok { + count-- + } + } + if count > 0 { namespace = utils.AdminNamespace } blName = dataExport.Spec.Destination.Name diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go index c21902b623..7bc3d6bc26 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go @@ -448,7 +448,15 @@ func buildJob(jobName string, jobOptions drivers.JobOpts) (*batchv1.Job, error) } var resourceNamespace string var live bool - if len(pods) > 0 { + // filter out the pods that are create by us + count := len(pods) + for _, pod := range pods { + labels := pod.ObjectMeta.Labels + if _, ok := labels[drivers.DriverNameLabel]; ok { + count-- + } + } + if count > 0 { resourceNamespace = utils.AdminNamespace live = true } else { diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go index f55cf5ca9f..ff8eb70f0f 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go @@ -39,6 +39,14 @@ const ( KdmpConfigmapNamespace = "kube-system" // DefaultCompresion default compression type DefaultCompresion = "s2-parallel-8" + // DefaultQPS - default qps value for k8s apis + DefaultQPS = 100 + // DefaultBurst - default burst value for k8s apis + DefaultBurst = 100 + // QPSKey - configmap QPS key name + QPSKey = "K8S_QPS" + // BurstKey - configmap burst key name + BurstKey = "K8S_BURST" ) var ( diff --git a/vendor/modules.txt b/vendor/modules.txt index 90e6af564d..82588e39b4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -691,7 +691,7 @@ github.com/pierrec/lz4/internal/xxh32 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/portworx/kdmp v0.4.1-0.20221112180956-71d263e21fb9 +# github.com/portworx/kdmp v0.4.1-0.20221120133908-371b5c7190ff ## explicit github.com/portworx/kdmp/pkg/apis/kdmp github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1 From dc5220e98c18c2387e3b0c622397ef064e24940a Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Sat, 19 Nov 2022 02:05:30 +0000 Subject: [PATCH 89/97] pb-3312: Added steps to delete the resourceexport CR, when restore fails with timeout value. --- .../controllers/applicationrestore.go | 40 ++++++++++++------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index 299cc93fd2..56ababbfdd 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -1477,7 +1477,7 @@ func (a *ApplicationRestoreController) restoreResources( } else { // Check whether ResourceExport is present or not crName := getResourceExportCRName(utils.PrefixRestore, string(restore.UID), restore.Namespace) - resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, a.restoreAdminNamespace) + resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, restore.Namespace) if err != nil { if k8s_errors.IsNotFound(err) { // create resource export CR @@ -1497,7 +1497,7 @@ func (a *ApplicationRestoreController) restoreResources( resourceExport.Annotations = make(map[string]string) resourceExport.Annotations[utils.SkipResourceAnnotation] = "true" resourceExport.Name = crName - resourceExport.Namespace = a.restoreAdminNamespace + resourceExport.Namespace = restore.Namespace resourceExport.Spec.Type = kdmpapi.ResourceExportBackup resourceExport.Spec.TriggeredFrom = kdmputils.TriggeredFromStork storkPodNs, err := k8sutils.GetStorkPodNamespace() @@ -1695,6 +1695,19 @@ func (a *ApplicationRestoreController) cleanupRestore(restore *storkapi.Applicat return fmt.Errorf("cancel restore: %s", err) } } + var crNames = []string{} + // Directly calling DeleteResourceExport with out checking backuplocation type. + // For other backuplocation type, expecting Notfound + crNames = append(crNames, getResourceExportCRName(utils.PrefixRestore, string(restore.UID), restore.Namespace)) + crNames = append(crNames, getResourceExportCRName(utils.PrefixNFSRestorePVC, string(restore.UID), restore.Namespace)) + for _, crName := range crNames { + err := kdmpShedOps.Instance().DeleteResourceExport(crName, restore.Namespace) + if err != nil && !k8s_errors.IsNotFound(err) { + errMsg := fmt.Sprintf("failed to delete restore resource export CR [%v]: %v", crName, err) + log.ApplicationRestoreLog(restore).Errorf("%v", errMsg) + return err + } + } return nil } @@ -1737,21 +1750,18 @@ func (a *ApplicationRestoreController) cleanupResources(restore *storkapi.Applic logrus.Errorf("unable to cleanup post restore resources, err: %v", err) } } + var crNames = []string{} // Directly calling DeleteResourceExport with out checking backuplocation type. // For other backuplocation type, expecting Notfound - crName := getResourceExportCRName(utils.PrefixRestore, string(restore.UID), restore.Namespace) - err := kdmpShedOps.Instance().DeleteResourceExport(crName, a.restoreAdminNamespace) - if err != nil && !k8s_errors.IsNotFound(err) { - errMsg := fmt.Sprintf("failed to delete restore resource export CR [%v]: %v", crName, err) - log.ApplicationRestoreLog(restore).Errorf("%v", errMsg) - return err - } - crName = getResourceExportCRName(utils.PrefixNFSRestorePVC, string(restore.UID), restore.Namespace) - err = kdmpShedOps.Instance().DeleteResourceExport(crName, restore.Namespace) - if err != nil && !k8s_errors.IsNotFound(err) { - errMsg := fmt.Sprintf("failed to delete pvc creation resource export CR [%v]: %v", crName, err) - log.ApplicationRestoreLog(restore).Errorf("%v", errMsg) - return err + crNames = append(crNames, getResourceExportCRName(utils.PrefixRestore, string(restore.UID), restore.Namespace)) + crNames = append(crNames, getResourceExportCRName(utils.PrefixNFSRestorePVC, string(restore.UID), restore.Namespace)) + for _, crName := range crNames { + err := kdmpShedOps.Instance().DeleteResourceExport(crName, restore.Namespace) + if err != nil && !k8s_errors.IsNotFound(err) { + errMsg := fmt.Sprintf("failed to delete restore resource export CR [%v]: %v", crName, err) + log.ApplicationRestoreLog(restore).Errorf("%v", errMsg) + return err + } } return nil } From d538c1e99a07abcd94bfacba1c822254c81fce10 Mon Sep 17 00:00:00 2001 From: diptiranjan Date: Mon, 21 Nov 2022 09:52:14 +0530 Subject: [PATCH 90/97] PB-3316: Removing storageprovisioner annotation along with beta version also while restoring pvcs. --- drivers/volume/kdmp/kdmp.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/volume/kdmp/kdmp.go b/drivers/volume/kdmp/kdmp.go index d19143afc0..2ea3c832e7 100644 --- a/drivers/volume/kdmp/kdmp.go +++ b/drivers/volume/kdmp/kdmp.go @@ -614,7 +614,8 @@ func (k *kdmp) getRestorePVCs( delete(pvc.Annotations, bindCompletedKey) delete(pvc.Annotations, boundByControllerKey) delete(pvc.Annotations, storageClassKey) - delete(pvc.Annotations, storageProvisioner) + delete(pvc.Annotations, k8shelper.AnnBetaStorageProvisioner) + delete(pvc.Annotations, k8shelper.AnnStorageProvisioner) delete(pvc.Annotations, storageNodeAnnotation) pvc.Annotations[KdmpAnnotation] = StorkAnnotation } From d4a7af9fdebd52e71353602ec2d2efa2b38b3a12 Mon Sep 17 00:00:00 2001 From: Kesavan Thiruvenkadasamy Date: Wed, 16 Nov 2022 12:32:46 +0530 Subject: [PATCH 91/97] fixed error message in restore Signed-off-by: Kesavan Thiruvenkadasamy --- pkg/applicationmanager/controllers/applicationrestore.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index 56ababbfdd..6def2c50e3 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -713,7 +713,7 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat logrus.Infof("%s re cr %v status %v", funct, crName, resourceExport.Status.Status) switch resourceExport.Status.Status { case kdmpapi.ResourceExportStatusFailed: - message = fmt.Sprintf("%s Error creating CR %v for pvc creation: %v", funct, crName, err) + message = fmt.Sprintf("%s Error creating CR %v for pvc creation: %v", funct, crName, resourceExport.Status.Reason) restore.Status.Status = storkapi.ApplicationRestoreStatusFailed restore.Status.Stage = storkapi.ApplicationRestoreStageFinal restore.Status.Reason = message From 6f6ce846e3b3ae94c271f1ab030d5d56e8511fa2 Mon Sep 17 00:00:00 2001 From: Lalatendu Das Date: Wed, 23 Nov 2022 14:20:13 +0000 Subject: [PATCH 92/97] pb-3320: Enforce kdmp backup for all cloud provisioner for NFS BL For AWS , azure and GCP based volumes we will used kdmp driver to take a backup only when the the backuplocation type is NFS. Signed-off-by: Lalatendu Das --- drivers/volume/aws/aws.go | 5 +++++ drivers/volume/azure/azure.go | 5 +++++ drivers/volume/gcp/gcp.go | 5 +++++ 3 files changed, 15 insertions(+) diff --git a/drivers/volume/aws/aws.go b/drivers/volume/aws/aws.go index 78331013b0..e5c46b673a 100644 --- a/drivers/volume/aws/aws.go +++ b/drivers/volume/aws/aws.go @@ -122,6 +122,11 @@ func (a *aws) OwnsPVCForBackup( // If user has forced the backupType in config map, default to generic always return false } + // For AWS volume and backuplocation type is NFS, we will not own. + // It will default to kdmp + if blType == storkapi.BackupLocationNFS { + return false + } return a.OwnsPVC(coreOps, pvc) } diff --git a/drivers/volume/azure/azure.go b/drivers/volume/azure/azure.go index 2d14041e7f..72d50f214d 100644 --- a/drivers/volume/azure/azure.go +++ b/drivers/volume/azure/azure.go @@ -163,6 +163,11 @@ func (a *azure) OwnsPVCForBackup( // If user has forced the backupType in config map, default to generic always return false } + // For Azure based volume and backuplocation type is NFS, we will not own. + // It will default to kdmp + if blType == storkapi.BackupLocationNFS { + return false + } return a.OwnsPVC(coreOps, pvc) } diff --git a/drivers/volume/gcp/gcp.go b/drivers/volume/gcp/gcp.go index 4101785599..3f1e4179e5 100644 --- a/drivers/volume/gcp/gcp.go +++ b/drivers/volume/gcp/gcp.go @@ -104,6 +104,11 @@ func (g *gcp) OwnsPVCForBackup( // If user has forced the backupType in config map, default to generic always return false } + // For gcp volume and backuplocation type is NFS, we will not own. + // It will default to kdmp + if blType == storkapi.BackupLocationNFS { + return false + } return g.OwnsPVC(coreOps, pvc) } From 82fd21d9b1c81faf29a43a13fcd45530ee8ec850 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Thu, 24 Nov 2022 08:34:53 +0000 Subject: [PATCH 93/97] vendor latest changes from 1.2.3 kdmp branch --- go.mod | 2 +- go.sum | 2 ++ vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go | 2 +- vendor/modules.txt | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7a8be56cb5..624e3105ab 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/openshift/api v0.0.0-20210105115604-44119421ec6b github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47 github.com/pborman/uuid v1.2.0 - github.com/portworx/kdmp v0.4.1-0.20221120133908-371b5c7190ff + github.com/portworx/kdmp v0.4.1-0.20221123171404-53a7660f5795 github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 diff --git a/go.sum b/go.sum index 448e85c04d..ece4bba813 100644 --- a/go.sum +++ b/go.sum @@ -1430,6 +1430,8 @@ github.com/portworx/kdmp v0.4.1-0.20221112180956-71d263e21fb9 h1:I9LjqBwgka4Z3ny github.com/portworx/kdmp v0.4.1-0.20221112180956-71d263e21fb9/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kdmp v0.4.1-0.20221120133908-371b5c7190ff h1:KU7WhC+O8HsaFU1yZTb+8SCh7zUBfhQV2Qi4ar+OzNo= github.com/portworx/kdmp v0.4.1-0.20221120133908-371b5c7190ff/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= +github.com/portworx/kdmp v0.4.1-0.20221123171404-53a7660f5795 h1:JkUI/gVsYM3+z4QFtgaiicrQ/kwnSxBJGDQFPUAC7Go= +github.com/portworx/kdmp v0.4.1-0.20221123171404-53a7660f5795/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go b/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go index e4f4a25271..1d33e2b996 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go @@ -51,7 +51,7 @@ const ( CertFileName = "public.crt" CertSecretName = "tls-s3-cert" CertMount = "/etc/tls-s3-cert" - NfsMount = "/tmp/nfs-target/" + NfsMount = "/mnt/nfs-target/" ) // Driver job options. diff --git a/vendor/modules.txt b/vendor/modules.txt index 82588e39b4..1b7a27dc97 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -691,7 +691,7 @@ github.com/pierrec/lz4/internal/xxh32 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/portworx/kdmp v0.4.1-0.20221120133908-371b5c7190ff +# github.com/portworx/kdmp v0.4.1-0.20221123171404-53a7660f5795 ## explicit github.com/portworx/kdmp/pkg/apis/kdmp github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1 From 5883fa178a59410c325075cfd29eba609102a065 Mon Sep 17 00:00:00 2001 From: Lalatendu Das Date: Wed, 30 Nov 2022 04:45:22 +0000 Subject: [PATCH 94/97] pb-3347: Make CSI volumes to use kdmp driver for NFS BL When Backuplocation type is NFS, then we will use KDMP driver to take backup for CSI volumes. Signed-off-by: Lalatendu Das --- drivers/volume/csi/csi.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/volume/csi/csi.go b/drivers/volume/csi/csi.go index 5d6fbb0846..9060ca7569 100644 --- a/drivers/volume/csi/csi.go +++ b/drivers/volume/csi/csi.go @@ -311,6 +311,10 @@ func (c *csi) OwnsPVCForBackup( crBackupType string, blType storkapi.BackupLocationType, ) bool { + // For CSI volume and backuplocation type is NFS, It will default to kdmp + if blType == storkapi.BackupLocationNFS { + return false + } if cmBackupType == storkapi.ApplicationBackupGeneric || crBackupType == storkapi.ApplicationBackupGeneric { // If user has forced the backupType in config map or applicationbackup CR, default to generic always return false From 29d09ff79b9432e373e01f45de71b2b299aac06d Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Tue, 23 May 2023 12:04:28 +0000 Subject: [PATCH 95/97] vendored latest kdmp from 1.2.3 branch --- go.mod | 2 +- go.sum | 10 +- .../pkg/controllers/dataexport/reconcile.go | 13 +- .../portworx/kdmp/pkg/drivers/drivers.go | 2 +- .../pkg/drivers/kopiabackup/kopiabackup.go | 10 +- .../portworx/kdmp/pkg/drivers/utils/common.go | 142 +++++++++++------- .../portworx/kdmp/pkg/drivers/utils/utils.go | 4 +- vendor/modules.txt | 2 +- 8 files changed, 95 insertions(+), 90 deletions(-) diff --git a/go.mod b/go.mod index 624e3105ab..da2d697749 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/openshift/api v0.0.0-20210105115604-44119421ec6b github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47 github.com/pborman/uuid v1.2.0 - github.com/portworx/kdmp v0.4.1-0.20221123171404-53a7660f5795 + github.com/portworx/kdmp v0.4.1-0.20230523115608-d3dc39fab560 github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 diff --git a/go.sum b/go.sum index ece4bba813..f0c0c2277c 100644 --- a/go.sum +++ b/go.sum @@ -1424,14 +1424,8 @@ github.com/portworx/kdmp v0.4.1-0.20220414053457-962507678379/go.mod h1:EAVroITf github.com/portworx/kdmp v0.4.1-0.20220710173715-5d42efc7d149/go.mod h1:nb5AupP/63ByyqAYfZ+E32LDEnP0PjgH6w+yKXxWIgE= github.com/portworx/kdmp v0.4.1-0.20220902105026-dc14791e1508/go.mod h1:sTO9LkPkExEVE6BqowIzkrQsyBtGdaC4Vh1AcKQ4xZA= github.com/portworx/kdmp v0.4.1-0.20220905153748-e0bb69e59f38/go.mod h1:NI2UgLITtggRvvaRA7lE4+Np4htDp+06Jf1LWksKyGA= -github.com/portworx/kdmp v0.4.1-0.20221110051828-bdbb3afc4dd0 h1:rIpCh+5iQCIQgpjUWsc1ToWXeX+NbtCiZm2leyPx5s4= -github.com/portworx/kdmp v0.4.1-0.20221110051828-bdbb3afc4dd0/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= -github.com/portworx/kdmp v0.4.1-0.20221112180956-71d263e21fb9 h1:I9LjqBwgka4Z3ny8sDioy3RfX4j5v5mLRVUS3H0f2D4= -github.com/portworx/kdmp v0.4.1-0.20221112180956-71d263e21fb9/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= -github.com/portworx/kdmp v0.4.1-0.20221120133908-371b5c7190ff h1:KU7WhC+O8HsaFU1yZTb+8SCh7zUBfhQV2Qi4ar+OzNo= -github.com/portworx/kdmp v0.4.1-0.20221120133908-371b5c7190ff/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= -github.com/portworx/kdmp v0.4.1-0.20221123171404-53a7660f5795 h1:JkUI/gVsYM3+z4QFtgaiicrQ/kwnSxBJGDQFPUAC7Go= -github.com/portworx/kdmp v0.4.1-0.20221123171404-53a7660f5795/go.mod h1:v8uQbjGe6UFNyrZ+vFKgEu30wsaTc1qg0OYOKmVmUOE= +github.com/portworx/kdmp v0.4.1-0.20230523115608-d3dc39fab560 h1:VfFUh5ZwZ+4xhDEuG2Rh/p9Rm+9mXicRuVd4j7nNIEw= +github.com/portworx/kdmp v0.4.1-0.20230523115608-d3dc39fab560/go.mod h1:6XtJRBuPdSrnKuPD2vKLsVHbkKpF/5M/N2mAKP5hnqw= github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= diff --git a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go index f563fe64d5..87658ab457 100644 --- a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go +++ b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go @@ -3,7 +3,6 @@ package dataexport import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "reflect" @@ -540,15 +539,7 @@ func (c *Controller) createJobCredCertSecrets( } return data, err } - // filter out the pods that are create by us - count := len(pods) - for _, pod := range pods { - labels := pod.ObjectMeta.Labels - if _, ok := labels[drivers.DriverNameLabel]; ok { - count-- - } - } - if count > 0 { + if len(pods) > 0 { namespace = utils.AdminNamespace } blName = dataExport.Spec.Destination.Name @@ -2043,7 +2034,7 @@ func createAzureSecret(secretName string, backupLocation *storkapi.BackupLocatio func createCertificateSecret(secretName, namespace string, labels map[string]string) error { drivers.CertFilePath = os.Getenv(drivers.CertDirPath) if drivers.CertFilePath != "" { - certificateData, err := ioutil.ReadFile(filepath.Join(drivers.CertFilePath, drivers.CertFileName)) + certificateData, err := os.ReadFile(filepath.Join(drivers.CertFilePath, drivers.CertFileName)) if err != nil { errMsg := fmt.Sprintf("failed reading data from file %s : %s", drivers.CertFilePath, err) logrus.Errorf("%v", errMsg) diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go b/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go index 1d33e2b996..e4f4a25271 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go @@ -51,7 +51,7 @@ const ( CertFileName = "public.crt" CertSecretName = "tls-s3-cert" CertMount = "/etc/tls-s3-cert" - NfsMount = "/mnt/nfs-target/" + NfsMount = "/tmp/nfs-target/" ) // Driver job options. diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go index 7bc3d6bc26..c21902b623 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go @@ -448,15 +448,7 @@ func buildJob(jobName string, jobOptions drivers.JobOpts) (*batchv1.Job, error) } var resourceNamespace string var live bool - // filter out the pods that are create by us - count := len(pods) - for _, pod := range pods { - labels := pod.ObjectMeta.Labels - if _, ok := labels[drivers.DriverNameLabel]; ok { - count-- - } - } - if count > 0 { + if len(pods) > 0 { resourceNamespace = utils.AdminNamespace live = true } else { diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go index ff8eb70f0f..b3990c9b48 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go @@ -4,6 +4,8 @@ import ( "fmt" "time" + version "github.com/hashicorp/go-version" + storkversion "github.com/libopenstorage/stork/pkg/version" coreops "github.com/portworx/sched-ops/k8s/core" rbacops "github.com/portworx/sched-ops/k8s/rbac" "github.com/portworx/sched-ops/task" @@ -46,7 +48,8 @@ const ( // QPSKey - configmap QPS key name QPSKey = "K8S_QPS" // BurstKey - configmap burst key name - BurstKey = "K8S_BURST" + BurstKey = "K8S_BURST" + k8sMinVersionSASecretTokenNotSupport = "1.24" ) var ( @@ -54,6 +57,23 @@ var ( JobPodBackOffLimit = int32(10) ) +// isServiceAccountSecretMissing returns true, if the K8s version does not support secret token for the service account. +func isServiceAccountSecretMissing() (bool, error) { + k8sVersion, _, err := storkversion.GetFullVersion() + if err != nil { + return false, err + } + VersionTokenNotSupported, err := version.NewVersion(k8sMinVersionSASecretTokenNotSupport) + if err != nil { + return false, err + + } + if k8sVersion.GreaterThanOrEqual(VersionTokenNotSupported) { + return true, nil + } + return false, nil +} + // SetupServiceAccount create a service account and bind it to a provided role. func SetupServiceAccount(name, namespace string, role *rbacv1.Role) error { if role != nil { @@ -73,35 +93,39 @@ func SetupServiceAccount(name, namespace string, role *rbacv1.Role) error { if sa, err = coreops.Instance().CreateServiceAccount(serviceAccountFor(name, namespace)); err != nil && !errors.IsAlreadyExists(err) { return fmt.Errorf("create %s/%s serviceaccount: %s", namespace, name, err) } - t := func() (interface{}, bool, error) { - sa, err = coreops.Instance().GetServiceAccount(name, namespace) - if err != nil { - errMsg := fmt.Sprintf("failed fetching sa [%v/%v]: %v", name, namespace, err) - logrus.Tracef("%v", errMsg) - return "", true, fmt.Errorf("%v", errMsg) + // From 1.24.0 onwards service token does not support default secret token + tokenSupported, err := isServiceAccountSecretMissing() + if !tokenSupported { + t := func() (interface{}, bool, error) { + sa, err = coreops.Instance().GetServiceAccount(name, namespace) + if err != nil { + errMsg := fmt.Sprintf("failed fetching sa [%v/%v]: %v", name, namespace, err) + logrus.Tracef("%v", errMsg) + return "", true, fmt.Errorf("%v", errMsg) + } + if sa.Secrets == nil { + errMsg := fmt.Sprintf("secret token is missing in sa [%v/%v]", name, namespace) + return "", true, fmt.Errorf("%v", errMsg) + } + return "", false, nil } - if sa.Secrets == nil { - errMsg := fmt.Sprintf("secret token is missing in sa [%v/%v]", name, namespace) - return "", true, fmt.Errorf("%v", errMsg) + if _, err := task.DoRetryWithTimeout(t, DefaultTimeout, ProgressCheckInterval); err != nil { + errMsg := fmt.Sprintf("max retries done, failed in fetching secret token of sa [%v/%v]: %v ", name, namespace, err) + logrus.Errorf("%v", errMsg) + // Exhausted all retries + return err } - return "", false, nil - } - if _, err := task.DoRetryWithTimeout(t, DefaultTimeout, ProgressCheckInterval); err != nil { - errMsg := fmt.Sprintf("max retries done, failed in fetching secret token of sa [%v/%v]: %v ", name, namespace, err) - logrus.Errorf("%v", errMsg) - // Exhausted all retries - return err - } - tokenName := sa.Secrets[0].Name - secretToken, err := coreops.Instance().GetSecret(tokenName, namespace) - if err != nil { - return fmt.Errorf("failed in getting secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) - } - secretToken.Annotations[SkipResourceAnnotation] = "true" - _, err = coreops.Instance().UpdateSecret(secretToken) - if err != nil { - return fmt.Errorf("failed in updating the secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) + tokenName := sa.Secrets[0].Name + secretToken, err := coreops.Instance().GetSecret(tokenName, namespace) + if err != nil { + return fmt.Errorf("failed in getting secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) + } + secretToken.Annotations[SkipResourceAnnotation] = "true" + _, err = coreops.Instance().UpdateSecret(secretToken) + if err != nil { + return fmt.Errorf("failed in updating the secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) + } } return nil } @@ -140,40 +164,44 @@ func SetupNFSServiceAccount(name, namespace string, role *rbacv1.ClusterRole) er return fmt.Errorf("create %s/%s serviceaccount: %s", namespace, name, err) } var errMsg error - t := func() (interface{}, bool, error) { - sa, err = coreops.Instance().GetServiceAccount(name, namespace) + // From 1.24.0 onwards service token does not support default secret token + tokenSupported, err := isServiceAccountSecretMissing() + if !tokenSupported { + t := func() (interface{}, bool, error) { + sa, err = coreops.Instance().GetServiceAccount(name, namespace) + if err != nil { + errMsg = fmt.Errorf("failed fetching sa [%v/%v]: %v", name, namespace, err) + logrus.Errorf("%v", errMsg) + return "", true, fmt.Errorf("%v", errMsg) + } + if sa.Secrets == nil { + logrus.Infof("Returned sa-secret null") + errMsg = fmt.Errorf("secret token is missing in sa [%v/%v]", name, namespace) + return "", true, fmt.Errorf("%v", errMsg) + } + return "", false, nil + } + if _, err := task.DoRetryWithTimeout(t, DefaultTimeout, ProgressCheckInterval); err != nil { + eMsg := fmt.Errorf("max retries done, failed in fetching secret token of sa [%v/%v]: %v ", name, namespace, errMsg) + logrus.Errorf("%v", eMsg) + // Exhausted all retries + return eMsg + } + + tokenName := sa.Secrets[0].Name + secretToken, err := coreops.Instance().GetSecret(tokenName, namespace) if err != nil { - errMsg = fmt.Errorf("failed fetching sa [%v/%v]: %v", name, namespace, err) + errMsg := fmt.Errorf("failed in getting secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) logrus.Errorf("%v", errMsg) - return "", true, fmt.Errorf("%v", errMsg) + return errMsg } - if sa.Secrets == nil { - logrus.Infof("Returned sa-secret null") - errMsg = fmt.Errorf("secret token is missing in sa [%v/%v]", name, namespace) - return "", true, fmt.Errorf("%v", errMsg) + secretToken.Annotations[SkipResourceAnnotation] = "true" + _, err = coreops.Instance().UpdateSecret(secretToken) + if err != nil { + errMsg := fmt.Errorf("failed in updating the secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) + logrus.Errorf("%v", errMsg) + return errMsg } - return "", false, nil - } - if _, err := task.DoRetryWithTimeout(t, DefaultTimeout, ProgressCheckInterval); err != nil { - eMsg := fmt.Errorf("max retries done, failed in fetching secret token of sa [%v/%v]: %v ", name, namespace, errMsg) - logrus.Errorf("%v", eMsg) - // Exhausted all retries - return eMsg - } - - tokenName := sa.Secrets[0].Name - secretToken, err := coreops.Instance().GetSecret(tokenName, namespace) - if err != nil { - errMsg := fmt.Errorf("failed in getting secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) - logrus.Errorf("%v", errMsg) - return errMsg - } - secretToken.Annotations[SkipResourceAnnotation] = "true" - _, err = coreops.Instance().UpdateSecret(secretToken) - if err != nil { - errMsg := fmt.Errorf("failed in updating the secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) - logrus.Errorf("%v", errMsg) - return errMsg } return nil } diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go index ac75174f25..07e9bae4e5 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go @@ -815,12 +815,12 @@ func WaitForPVAvailable(pvName string) (*corev1.PersistentVolume, error) { return pv, nil } -//GetPvcNameForJob - returns the PVC name for a job +// GetPvcNameForJob - returns the PVC name for a job func GetPvcNameForJob(jobName string) string { return "pvc-" + jobName } -//GetPvNameForJob - returns pv name for a job +// GetPvNameForJob - returns pv name for a job func GetPvNameForJob(jobName string) string { return "pv-" + jobName } diff --git a/vendor/modules.txt b/vendor/modules.txt index 1b7a27dc97..1019fb5513 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -691,7 +691,7 @@ github.com/pierrec/lz4/internal/xxh32 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/portworx/kdmp v0.4.1-0.20221123171404-53a7660f5795 +# github.com/portworx/kdmp v0.4.1-0.20230523115608-d3dc39fab560 ## explicit github.com/portworx/kdmp/pkg/apis/kdmp github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1 From ae34027389d8fb1035bf58a8a889b5c21f6c9a88 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Tue, 23 May 2023 13:23:57 +0000 Subject: [PATCH 96/97] updating kdmp version from 1.2.3 to 1.2.3-dev --- go.mod | 2 +- go.sum | 2 ++ vendor/github.com/portworx/kdmp/pkg/version/version.go | 2 +- vendor/modules.txt | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index da2d697749..8ce6af6402 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/openshift/api v0.0.0-20210105115604-44119421ec6b github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47 github.com/pborman/uuid v1.2.0 - github.com/portworx/kdmp v0.4.1-0.20230523115608-d3dc39fab560 + github.com/portworx/kdmp v0.4.1-0.20230523125352-aae43abbbd80 github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 diff --git a/go.sum b/go.sum index f0c0c2277c..fc2b6f26ec 100644 --- a/go.sum +++ b/go.sum @@ -1426,6 +1426,8 @@ github.com/portworx/kdmp v0.4.1-0.20220902105026-dc14791e1508/go.mod h1:sTO9LkPk github.com/portworx/kdmp v0.4.1-0.20220905153748-e0bb69e59f38/go.mod h1:NI2UgLITtggRvvaRA7lE4+Np4htDp+06Jf1LWksKyGA= github.com/portworx/kdmp v0.4.1-0.20230523115608-d3dc39fab560 h1:VfFUh5ZwZ+4xhDEuG2Rh/p9Rm+9mXicRuVd4j7nNIEw= github.com/portworx/kdmp v0.4.1-0.20230523115608-d3dc39fab560/go.mod h1:6XtJRBuPdSrnKuPD2vKLsVHbkKpF/5M/N2mAKP5hnqw= +github.com/portworx/kdmp v0.4.1-0.20230523125352-aae43abbbd80 h1:XxGZ/txfyifpEUXb5ecMakrcZfYjzPHHHqS00VdRdww= +github.com/portworx/kdmp v0.4.1-0.20230523125352-aae43abbbd80/go.mod h1:6XtJRBuPdSrnKuPD2vKLsVHbkKpF/5M/N2mAKP5hnqw= github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= diff --git a/vendor/github.com/portworx/kdmp/pkg/version/version.go b/vendor/github.com/portworx/kdmp/pkg/version/version.go index 0d0e7bc0c2..e488487015 100644 --- a/vendor/github.com/portworx/kdmp/pkg/version/version.go +++ b/vendor/github.com/portworx/kdmp/pkg/version/version.go @@ -24,7 +24,7 @@ const ( // // These variables typically come from -ldflags settings. var ( - gitVersion = "1.2.3" + gitVersion = "1.2.3-dev" gitCommit = "" // sha1 from git, output of $(git rev-parse HEAD) buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') kbVerRegex = regexp.MustCompile(`^(v\d+\.\d+\.\d+)(.*)`) diff --git a/vendor/modules.txt b/vendor/modules.txt index 1019fb5513..a72da997b5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -691,7 +691,7 @@ github.com/pierrec/lz4/internal/xxh32 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/portworx/kdmp v0.4.1-0.20230523115608-d3dc39fab560 +# github.com/portworx/kdmp v0.4.1-0.20230523125352-aae43abbbd80 ## explicit github.com/portworx/kdmp/pkg/apis/kdmp github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1 From b57a1c2f030846e837520ecdc8e108b3339ef829 Mon Sep 17 00:00:00 2001 From: sivakumar subraani Date: Tue, 23 May 2023 13:57:32 +0000 Subject: [PATCH 97/97] formatting changes --- pkg/apis/stork/v1alpha1/resourcetransformation.go | 2 +- test/integration_test/migration_test.go | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/apis/stork/v1alpha1/resourcetransformation.go b/pkg/apis/stork/v1alpha1/resourcetransformation.go index b6abb33431..0dd7c06ec0 100644 --- a/pkg/apis/stork/v1alpha1/resourcetransformation.go +++ b/pkg/apis/stork/v1alpha1/resourcetransformation.go @@ -96,7 +96,7 @@ type TransformResourceInfo struct { } // ResourceTransformationSpec is used to update k8s resources -//before migration/restore +// before migration/restore type ResourceTransformationSpec struct { Objects []TransformSpecs `json:"transformSpecs"` } diff --git a/test/integration_test/migration_test.go b/test/integration_test/migration_test.go index bde0b911be..de47a4249b 100644 --- a/test/integration_test/migration_test.go +++ b/test/integration_test/migration_test.go @@ -160,7 +160,6 @@ func triggerMigration( return ctxs, preMigrationCtx } -// // validateMigrationSummary validats the migration summary // currently we don't have an automated way to find out how many resources got deployed // through torpedo specs. For ex. a statefulset can have an inline PVC and that should